diff --git a/.dockerignore b/.dockerignore
index b20cfb8..4f72fd2 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,3 +1,4 @@
jupyterhub
values.yaml
config.yml
+config-generator
diff --git a/.gitignore b/.gitignore
index 5c29fa9..e239487 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,5 @@
*.pyc
__pycache__
-jupyterhub
values.yaml
*.egg-info
build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2172a56..f1de4c9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -27,7 +27,7 @@ repos:
args:
- --max-line-length=88
- --max-doc-length=90
- - --ignore=E203,W503,W505
+ - --ignore=E203,W503,W505,F821,E302,E402
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
diff --git a/application_hub_context/app_hub_context.py b/application_hub_context/app_hub_context.py
index 3ebff7b..f805fec 100644
--- a/application_hub_context/app_hub_context.py
+++ b/application_hub_context/app_hub_context.py
@@ -1,10 +1,12 @@
import os
import time
+import yaml
from abc import ABC
from http import HTTPStatus
from typing import Dict, TextIO
from kubernetes import client, config
+from kubernetes.utils import create_from_dict
from kubernetes.client import Configuration
from kubernetes.client.rest import ApiException
from kubernetes.config.config_exception import ConfigException
@@ -33,9 +35,9 @@ def __init__(
self.api_client = self._get_api_client(self.kubeconfig_file)
self.core_v1_api = self._get_core_v1_api()
self.batch_v1_api = self._get_batch_v1_api()
+ self.apps_v1_api = self._get_apps_v1_api()
self.rbac_authorization_v1_api = self._get_rbac_authorization_v1_api()
self.namespace = namespace
-
self.spawner = spawner
# get the groups the user belongs to
self.user_groups = [group.name for group in self.spawner.user.groups]
@@ -122,6 +124,9 @@ def _get_batch_v1_api(self) -> client.BatchV1Api:
def _get_rbac_authorization_v1_api(self) -> client.RbacAuthorizationApi:
return client.RbacAuthorizationV1Api(self.api_client)
+ def _get_apps_v1_api(self) -> client.AppsV1Api:
+ return client.AppsV1Api(self.api_client)
+
def is_object_created(self, read_method, **kwargs):
read_methods = {}
@@ -195,6 +200,35 @@ def initialise(self):
def dispose(self):
pass
+ def create_namespace(
+ self, labels: dict = None, annotations: dict = None
+ ) -> client.V1Namespace:
+
+ if self.is_namespace_created():
+ self.spawner.log.info(
+ f"namespace {self.namespace} exists, skipping creation"
+ )
+ return self.core_v1_api.read_namespace(name=self.namespace)
+
+ self.spawner.log.info(f"creating namespace {self.namespace}")
+ try:
+ body = client.V1Namespace(
+ metadata=client.V1ObjectMeta(
+ name=self.namespace, labels=labels, annotations=annotations
+ ) # noqa: E501
+ )
+ response = self.core_v1_api.create_namespace(
+ body=body, async_req=False
+ ) # noqa: E501
+
+ if not self.retry(self.is_namespace_created):
+ raise ApiException(http_resp=response)
+ self.spawner.log.info(f"namespace {self.namespace} created")
+ return response
+ except ApiException as e:
+ self.spawner.log.error(f"namespace {self.namespace} creation failed, {e}\n")
+ raise e
+
@staticmethod
def retry(fun, max_tries=10, interval=1, **kwargs):
for i in range(max_tries):
@@ -390,7 +424,6 @@ def create_role(
resources: list[str] = [""],
api_groups: list[str] = ["*"],
):
-
if self.is_role_created(name=name):
return self.rbac_authorization_v1_api.read_namespaced_role(
name=name, namespace=self.namespace
@@ -424,9 +457,7 @@ def create_role(
raise e
def create_image_pull_secret(self, name: str, data):
-
if self.is_image_pull_secret_created(name=name):
-
return self.core_v1_api.read_namespaced_secret(
namespace=self.namespace, name=name
) # noqa: E501
@@ -500,6 +531,48 @@ def patch_service_account(self, secret_name: str):
except ApiException as e:
raise e
+ # new function to apply a set of manifests like kubectl apply -f
+ # def apply_manifests(self, manifest_file):
+ def apply_manifest(self, manifest):
+
+ create_from_dict(
+ k8s_client=self.api_client,
+ data=manifest,
+ verbose=True,
+ namespace=self.namespace,
+ )
+
+ def unapply_manifests(self, manifest_content):
+
+ manifests = yaml.safe_load_all(manifest_content)
+
+ for k8_object in manifests:
+ kind = k8_object.get("kind")
+ self.spawner.log.info(
+ f"Deleting {kind} {k8_object.get('metadata', {}).get('name')}"
+ )
+ metadata = k8_object.get("metadata", {})
+ namespace = metadata.get("namespace", self.namespace)
+ name = metadata.get("name")
+
+ if not kind or not name:
+ continue
+
+ try:
+ if kind == "Deployment":
+ self.apps_v1_api.delete_namespaced_deployment(name, namespace)
+ elif kind == "Service":
+ self.core_v1_api.delete_namespaced_service(name, namespace)
+ elif kind == "Job":
+ self.batch_v1_api.delete_namespaced_job(name, namespace)
+ elif kind == "Pod":
+ self.core_v1_api.delete_namespaced_pod(name, namespace)
+ # Add other kinds as needed
+ else:
+ self.spawner.log.error(f"Unsupported kind: {kind}")
+ except client.exceptions.ApiException as e:
+ self.spawner.log.error(f"An error occurred: {e}")
+
class DefaultApplicationHubContext(ApplicationHubContext):
def get_profile_list(self):
@@ -576,6 +649,11 @@ def initialise(self):
# process the config maps
config_maps = self.config_parser.get_profile_config_maps(profile_id=profile_id)
+ # check the namespace
+ if not self.is_namespace_created():
+ self.spawner.log.info(f"Creating namespace {self.namespace}")
+ self.create_namespace()
+
if config_maps:
for config_map in config_maps:
try:
@@ -588,25 +666,34 @@ def initialise(self):
annotations=None,
labels=None,
)
- self.spawner.log.info(f"Mounting configmap {config_map.name}")
- self.spawner.volume_mounts.extend(
- [
- {
- "name": config_map.name,
- "mountPath": config_map.mount_path,
- "subPath": config_map.key,
- },
- ]
- )
+ if config_map.mount_path is not None:
- self.spawner.volumes.extend(
- [
- {
- "name": config_map.name,
- "configMap": {"name": config_map.key},
- }
- ]
- )
+ self.spawner.log.info(f"Mounting configmap {config_map.name}")
+ self.spawner.volume_mounts.extend(
+ [
+ {
+ "name": config_map.name,
+ "mountPath": config_map.mount_path,
+ "subPath": config_map.key,
+ },
+ ]
+ )
+ self.spawner.volumes.extend(
+ [
+ {
+ "name": config_map.name,
+ "configMap": {
+ "name": config_map.key,
+ "defaultMode": int(config_map.default_mode, 8)
+ if config_map.default_mode
+ else 0o644, # noqa: E501
+ },
+ }
+ ]
+ )
+ self.spawner.log.info(
+ f"Mounted configmap {config_map.name} (key {config_map.key}) mode {int(config_map.default_mode, 8)}" # noqa: E501
+ )
except Exception as err:
self.spawner.log.error(f"Unexpected {err=}, {type(err)=}")
self.spawner.log.error(
@@ -668,7 +755,6 @@ def initialise(self):
try:
# checking if role binding is already created
if not self.is_role_binding_created(name=role_binding.name):
-
# checking if role is already created
if not self.is_role_created(name=role_binding.role.name):
self.spawner.log.info(
@@ -755,6 +841,26 @@ def initialise(self):
f"Skipping creation of init container {init_container.name}"
)
+ # process the manifests
+ manifests = self.config_parser.get_profile_manifests(profile_id=profile_id)
+
+ if manifests:
+ for manifest in manifests:
+ self.spawner.log.info(f"Apply manifest {manifest.name}")
+
+ try:
+ ms = yaml.safe_load_all(manifest.content)
+ for k8_object in ms:
+ self.spawner.log.info(
+ f"Apply manifest kind {k8_object['kind']}"
+ )
+ self.apply_manifest(k8_object)
+ except Exception as err:
+ self.spawner.log.error(f"Unexpected {err}, {type(err)}")
+ self.spawner.log.error(
+ f"Skipping creation of manifest {manifest.name}"
+ )
+
def dispose(self):
profile_id = self.config_parser.get_profile_by_slug(slug=self.profile_slug).id
@@ -794,6 +900,14 @@ def dispose(self):
self.spawner.log.info(f"Dispose role binding {role_binding.name}")
self.delete_role_binding(role_binding=role_binding)
+ # process the manifests
+ manifests = self.config_parser.get_profile_manifests(profile_id=profile_id)
+ self.spawner.log.info(f"Delete manifest {manifests}")
+ if manifests:
+ for manifest in manifests:
+ self.spawner.log.info(f"Un-apply manifest {manifest.name}")
+ self.unapply_manifests(manifest_content=manifest.content)
+
# deal with the image pull secrets
image_pull_secrets = self.config_parser.get_profile_image_pull_secrets(
profile_id=profile_id
@@ -814,7 +928,6 @@ def dispose(self):
)
for elem in service_account_body.image_pull_secrets:
if elem.name == image_pull_secret.name:
-
service_account_body.image_pull_secrets.remove(
{"name": elem.name}
)
diff --git a/application_hub_context/models.py b/application_hub_context/models.py
index ec03106..51fa401 100644
--- a/application_hub_context/models.py
+++ b/application_hub_context/models.py
@@ -37,6 +37,13 @@ class defining a volume object:
persist: bool
+class Manifest(BaseModel):
+ name: str
+ key: str
+ content: Optional[str] = None
+ persist: Optional[bool] = True
+
+
class ConfigMap(BaseModel):
"""
@@ -50,8 +57,8 @@ class ConfigMap(BaseModel):
name: str
key: str
- mount_path: str
- default_mode: Optional[str]
+ mount_path: Optional[str] = None
+ default_mode: Optional[str] = None
readonly: bool
content: Optional[str] = None
persist: Optional[bool] = True
@@ -67,6 +74,10 @@ class KubespawnerOverride(BaseModel):
extra_resource_guarantees: Optional[dict] = {}
+class InitContainerVolumeMount(VolumeMount):
+ sub_path: str
+
+
class InitContainer(BaseModel):
name: str
image: str
@@ -165,6 +176,7 @@ class Profile(BaseModel):
role_bindings: Optional[List[RoleBinding]] = None
image_pull_secrets: Optional[List[ImagePullSecret]] = None
init_containers: Optional[List[InitContainer]] = None
+ manifests: Optional[List[Manifest]] = None
class Config(BaseModel):
diff --git a/application_hub_context/parser.py b/application_hub_context/parser.py
index 8ed1e44..6364a6b 100644
--- a/application_hub_context/parser.py
+++ b/application_hub_context/parser.py
@@ -124,3 +124,10 @@ def get_profile_image_pull_secrets(self, profile_id):
def get_profile_init_containers(self, profile_id):
"""returns the image pull secrets"""
return self.get_profile_by_id(profile_id=profile_id).init_containers
+
+ def get_profile_manifests(self, profile_id):
+ """returns the profile manifests"""
+ try:
+ return self.get_profile_by_id(profile_id=profile_id).manifests
+ except AttributeError:
+ pass
diff --git a/config-generator/config-generator.ipynb b/config-generator/config-generator.ipynb
new file mode 100644
index 0000000..a6d71d7
--- /dev/null
+++ b/config-generator/config-generator.ipynb
@@ -0,0 +1,546 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import yaml\n",
+ "from models import *\n",
+ "import os"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configuration\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "storage_class_rwo = \"standard\"\n",
+ "storage_class_rwx = \"standard\"\n",
+ "\n",
+ "workspace_volume_size = \"50Gi\"\n",
+ "calrissian_volume_size = \"50Gi\"\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Volumes\n",
+ "\n",
+ "Create the Volumes"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Workspace Volume\n",
+ "\n",
+ "The workspace volume is persisted."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "workspace_volume = Volume(\n",
+ " name=\"workspace-volume\",\n",
+ " size=workspace_volume_size,\n",
+ " claim_name=\"workspace-claim\",\n",
+ " mount_path=\"/workspace\",\n",
+ " storage_class=storage_class_rwo,\n",
+ " access_modes=[\"ReadWriteOnce\"],\n",
+ " volume_mount=VolumeMount(name=\"workspace-volume\", mount_path=\"/workspace\"),\n",
+ " persist=True,\n",
+ ")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Calrissian Volume\n",
+ "\n",
+ "This is a RWX volume, not persisted"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "calrissian_volume = Volume(\n",
+ " name=\"calrissian-volume\",\n",
+ " claim_name=\"calrissian-claim\",\n",
+ " size=calrissian_volume_size,\n",
+ " storage_class=storage_class_rwx,\n",
+ " access_modes=[\"ReadWriteMany\"],\n",
+ " volume_mount=VolumeMount(name=\"calrissian-volume\", mount_path=\"/calrissian\"),\n",
+ " persist=False,\n",
+ ")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## ConfigMaps\n",
+ "\n",
+ "These configmaps are mounted as files on the pod."
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### bash login\n",
+ "\n",
+ "This file is used for the JupyterLab Terminal configuration"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(\"config-maps/bash-login\", \"r\") as f:\n",
+ " content = f.read()\n",
+ "\n",
+ "bash_login_cm = ConfigMap(\n",
+ " name=\"bash-login\",\n",
+ " key=\"bash-login\",\n",
+ " content=content,\n",
+ " readonly=True,\n",
+ " persist=True,\n",
+ " mount_path=\"/workspace/.bash_login\",\n",
+ ")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### bash.rc\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(\"config-maps/bash-rc\", \"r\") as f:\n",
+ " content = f.read()\n",
+ "bash_rc_cm = ConfigMap(\n",
+ " name=\"bash-rc\",\n",
+ " key=\"bash-rc\",\n",
+ " content=content,\n",
+ " readonly=True,\n",
+ " persist=True,\n",
+ " mount_path=\"/workspace/.bashrc\",\n",
+ ")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Profiles"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "profiles = []"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Coder"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "coders = {\n",
+ " \"coder1\": {\n",
+ " \"display_name\": \"Code Server Small\",\n",
+ " \"slug\": \"ellip_studio_coder_slug_s\",\n",
+ " \"cpu_limit\": 2,\n",
+ " \"mem_limit\": \"8G\",\n",
+ " },\n",
+ " \"coder2\": {\n",
+ " \"display_name\": \"Code Server Medium\",\n",
+ " \"slug\": \"ellip_studio_coder_slug_m\",\n",
+ " \"cpu_limit\": 4,\n",
+ " \"mem_limit\": \"12G\",\n",
+ " },\n",
+ "}\n",
+ "\n",
+ "for key, value in coders.items():\n",
+ " coder_definition = ProfileDefinition(\n",
+ " display_name=value[\"display_name\"],\n",
+ " slug=value[\"slug\"],\n",
+ " default=False,\n",
+ " kubespawner_override=KubespawnerOverride(\n",
+ " cpu_limit=value[\"cpu_limit\"],\n",
+ " mem_limit=value[\"mem_limit\"],\n",
+ " image=\"eoepca/pde-code-server:develop\",\n",
+ " ),\n",
+ " )\n",
+ "\n",
+ " coder_profile = Profile(\n",
+ " id=f\"profile_studio_{key}\",\n",
+ " groups=[\"group-a\", \"group-b\"],\n",
+ " definition=coder_definition,\n",
+ " node_selector={},\n",
+ " volumes=[calrissian_volume, workspace_volume],\n",
+ " config_maps=[\n",
+ " bash_rc_cm,\n",
+ " ],\n",
+ " pod_env_vars={\n",
+ " \"HOME\": \"/workspace\",\n",
+ " \"CONDA_ENVS_PATH\": \" /workspace/.envs\",\n",
+ " },\n",
+ " )\n",
+ "\n",
+ " profiles.append(coder_profile)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## init.sh script"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(\"./config-maps/init.sh\", \"r\") as f:\n",
+ " content = f.read()\n",
+ "\n",
+ "init_cm = ConfigMap(\n",
+ " name=\"init\",\n",
+ " key=\"init\",\n",
+ " content=content,\n",
+ " readonly=True,\n",
+ " persist=False,\n",
+ " mount_path=\"/opt/init/.init.sh\",\n",
+ " default_mode=\"0660\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "init_context_volume_mount = InitContainerVolumeMount(\n",
+ " mount_path=\"/opt/init/.init.sh\", name=\"init\", sub_path=\"init\"\n",
+ ")\n",
+ "init_container = InitContainer(\n",
+ " name=\"init-file-on-volume\",\n",
+ " image=\"eoepca/pde-code-server:develop\",\n",
+ " command=[\"sh\", \"-c\", \"sh /opt/init/.init.sh\"],\n",
+ " volume_mounts=[\n",
+ " VolumeMount(name=\"workspace-volume\", mount_path=\"/workspace\"),\n",
+ " init_context_volume_mount,\n",
+ " ],\n",
+ ")\n",
+ "\n",
+ "eoepca_demo_init_script_profile = Profile(\n",
+ " id=f\"profile_demo_init_script\",\n",
+ " groups=[\"group-a\", \"group-b\"],\n",
+ " definition=ProfileDefinition(\n",
+ " display_name=\"Coder demo init script\",\n",
+ " description=\"This profile is used to demonstrate the use of an init script\",\n",
+ " slug=\"eoepca_demo_init_script\",\n",
+ " default=False,\n",
+ " kubespawner_override=KubespawnerOverride(\n",
+ " cpu_guarantee=1,\n",
+ " cpu_limit=2,\n",
+ " mem_guarantee=\"4G\",\n",
+ " mem_limit=\"6G\",\n",
+ " image=\"eoepca/pde-code-server:develop\",\n",
+ " ),\n",
+ " ),\n",
+ " node_selector={},\n",
+ " volumes=[calrissian_volume, workspace_volume],\n",
+ " config_maps=[init_cm],\n",
+ " pod_env_vars={\n",
+ " \"HOME\": \"/workspace\",\n",
+ " \"CONDA_ENVS_PATH\": \"/workspace/.envs\",\n",
+ " \"CONDARC\": \"/workspace/.condarc\",\n",
+ " \"XDG_RUNTIME_DIR\": \"/workspace/.local\",\n",
+ " \"CODE_SERVER_WS\": \"/workspace/mastering-app-package\",\n",
+ " },\n",
+ " init_containers=[init_container],\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "profiles.append(eoepca_demo_init_script_profile)\n",
+ "\n",
+ "eoepca_demo_init_script_profile = Profile(\n",
+ " id=f\"profile_demo_init_script\",\n",
+ " groups=[\"group-a\", \"group-b\"],\n",
+ " definition=ProfileDefinition(\n",
+ " display_name=\"Coder demo init script\",\n",
+ " description=\"This profile is used to demonstrate the use of an init script\",\n",
+ " slug=\"eoepca_demo_init_script\",\n",
+ " default=False,\n",
+ " kubespawner_override=KubespawnerOverride(\n",
+ " cpu_guarantee=1,\n",
+ " cpu_limit=2,\n",
+ " mem_guarantee=\"4G\",\n",
+ " mem_limit=\"6G\",\n",
+ " image=\"eoepca/pde-code-server:develop\",\n",
+ " ),\n",
+ " ),\n",
+ " node_selector={},\n",
+ " volumes=[calrissian_volume, workspace_volume],\n",
+ " config_maps=[init_cm],\n",
+ " pod_env_vars={\n",
+ " \"HOME\": \"/workspace\",\n",
+ " \"CONDA_ENVS_PATH\": \"/workspace/.envs\",\n",
+ " \"CONDARC\": \"/workspace/.condarc\",\n",
+ " \"XDG_RUNTIME_DIR\": \"/workspace/.local\",\n",
+ " \"CODE_SERVER_WS\": \"/workspace/mastering-app-package\",\n",
+ " },\n",
+ " init_containers=[init_container],\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## JupyterLab"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "image = \"jupyter/scipy-notebook\"\n",
+ "\n",
+ "\n",
+ "eoepca_jupyter_lab_profile = Profile(\n",
+ " id=\"profile_jupyter_lab\",\n",
+ " groups=[\"group-c\"],\n",
+ " definition=ProfileDefinition(\n",
+ " display_name=\"Jupyter Lab\",\n",
+ " description=\"Jupyter Lab with Python 3.11\",\n",
+ " slug=\"eoepca_jupyter_lab\",\n",
+ " default=False,\n",
+ " kubespawner_override=KubespawnerOverride(\n",
+ " cpu_guarantee=1,\n",
+ " cpu_limit=2,\n",
+ " mem_guarantee=\"4G\",\n",
+ " mem_limit=\"6G\",\n",
+ " image=image,\n",
+ " ),\n",
+ " ),\n",
+ " node_selector={},\n",
+ " volumes=[workspace_volume],\n",
+ " config_maps=[],\n",
+ " pod_env_vars={\n",
+ " \"HOME\": \"/workspace\",\n",
+ " \"XDG_RUNTIME_DIR\": \"/workspace/.local\",\n",
+ " \"XDG_CONFIG_HOME\": \"/workspace/.config\",\n",
+ " },\n",
+ ")\n",
+ "\n",
+ "profiles.append(eoepca_jupyter_lab_profile)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Image pull secret\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "image_pull_secret = ImagePullSecret(\n",
+ " name=\"cr-config\",\n",
+ " persist=False,\n",
+ " data=\"\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "image = \"jupyter/scipy-notebook\"\n",
+ "\n",
+ "\n",
+ "eoepca_jupyter_lab_profile_2 = Profile(\n",
+ " id=\"profile_jupyter_lab_2\",\n",
+ " groups=[\"group-c\"],\n",
+ " definition=ProfileDefinition(\n",
+ " display_name=\"Jupyter Lab - profile 2\",\n",
+ " description=\"Jupyter Lab with Python 3.11 - demoes the use of an image pull secret\",\n",
+ " slug=\"eoepca_jupyter_lab_2\",\n",
+ " default=False,\n",
+ " kubespawner_override=KubespawnerOverride(\n",
+ " cpu_guarantee=1,\n",
+ " cpu_limit=2,\n",
+ " mem_guarantee=\"4G\",\n",
+ " mem_limit=\"6G\",\n",
+ " image=image,\n",
+ " ),\n",
+ " ),\n",
+ " node_selector={},\n",
+ " volumes=[workspace_volume],\n",
+ " config_maps=[],\n",
+ " pod_env_vars={\n",
+ " \"HOME\": \"/workspace\",\n",
+ " \"XDG_RUNTIME_DIR\": \"/workspace/.local\",\n",
+ " \"XDG_CONFIG_HOME\": \"/workspace/.config\",\n",
+ " },\n",
+ " image_pull_secrets=[image_pull_secret],\n",
+ ")\n",
+ "\n",
+ "profiles.append(eoepca_jupyter_lab_profile_2)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configuration"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "config = Config(\n",
+ " profiles=profiles\n",
+ ")\n",
+ "\n",
+ "with open(\n",
+ " \"../jupyterhub/files/hub/config.yml\", \"w\"\n",
+ ") as file:\n",
+ " yaml.dump(config.dict(), file)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[Profile(id='profile_studio_coder1', groups=['group-a', 'group-b'], definition=ProfileDefinition(display_name='Code Server Small', description=None, slug='ellip_studio_coder_slug_s', default=False, kubespawner_override=KubespawnerOverride(cpu_limit=2, cpu_guarantee=None, mem_limit='8G', mem_guarantee=None, image='eoepca/pde-code-server:develop', extra_resource_limits={}, extra_resource_guarantees={})), config_maps=[ConfigMap(name='bash-rc', key='bash-rc', mount_path='/workspace/.bashrc', default_mode=None, readonly=True, content='alias ll=\"ls -l\"\\nalias calrissian=\"/opt/conda/bin/calrissian --pod-nodeselectors /etc/calrissian/pod-node-selector.yml --stdout /calrissian/results.json --max-ram 16G --max-cores \"8\" --tmp-outdir-prefix /calrissian/tmp/ --outdir /calrissian/\"\\nalias cwltool=\"/opt/conda/bin/cwltool --podman\"\\n. /home/jovyan/.bashrc\\n# >>> conda initialize >>>\\n# !! Contents within this block are managed by \\'conda init\\' !!\\n__conda_setup=\"$(\\'/opt/conda/bin/conda\\' \\'shell.bash\\' \\'hook\\' 2> /dev/null)\"\\nif [ $? -eq 0 ]; then\\n eval \"$__conda_setup\"\\nelse\\n if [ -f \"/opt/conda/etc/profile.d/conda.sh\" ]; then\\n . \"/opt/conda/etc/profile.d/conda.sh\"\\n else\\n export PATH=\"/srv/conda/bin:$PATH\"\\n fi\\nfi\\nunset __conda_setup\\n\\nif [ -f \"/opt/conda/etc/profile.d/mamba.sh\" ]; then\\n . \"/opt/conda/etc/profile.d/mamba.sh\"\\nfi\\n# <<< conda initialize <<<\\n', persist=True)], volumes=[Volume(name='calrissian-volume', claim_name='calrissian-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteMany'], volume_mount=VolumeMount(name='calrissian-volume', mount_path='/calrissian'), persist=False), Volume(name='workspace-volume', claim_name='workspace-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteOnce'], volume_mount=VolumeMount(name='workspace-volume', mount_path='/workspace'), persist=True)], pod_env_vars={'HOME': '/workspace', 'CONDA_ENVS_PATH': ' /workspace/.envs'}, default_url=None, node_selector={}, role_bindings=None, image_pull_secrets=[], init_containers=[], manifests=None),\n",
+ " Profile(id='profile_studio_coder2', groups=['group-a', 'group-b'], definition=ProfileDefinition(display_name='Code Server Medium', description=None, slug='ellip_studio_coder_slug_m', default=False, kubespawner_override=KubespawnerOverride(cpu_limit=4, cpu_guarantee=None, mem_limit='12G', mem_guarantee=None, image='eoepca/pde-code-server:develop', extra_resource_limits={}, extra_resource_guarantees={})), config_maps=[ConfigMap(name='bash-rc', key='bash-rc', mount_path='/workspace/.bashrc', default_mode=None, readonly=True, content='alias ll=\"ls -l\"\\nalias calrissian=\"/opt/conda/bin/calrissian --pod-nodeselectors /etc/calrissian/pod-node-selector.yml --stdout /calrissian/results.json --max-ram 16G --max-cores \"8\" --tmp-outdir-prefix /calrissian/tmp/ --outdir /calrissian/\"\\nalias cwltool=\"/opt/conda/bin/cwltool --podman\"\\n. /home/jovyan/.bashrc\\n# >>> conda initialize >>>\\n# !! Contents within this block are managed by \\'conda init\\' !!\\n__conda_setup=\"$(\\'/opt/conda/bin/conda\\' \\'shell.bash\\' \\'hook\\' 2> /dev/null)\"\\nif [ $? -eq 0 ]; then\\n eval \"$__conda_setup\"\\nelse\\n if [ -f \"/opt/conda/etc/profile.d/conda.sh\" ]; then\\n . \"/opt/conda/etc/profile.d/conda.sh\"\\n else\\n export PATH=\"/srv/conda/bin:$PATH\"\\n fi\\nfi\\nunset __conda_setup\\n\\nif [ -f \"/opt/conda/etc/profile.d/mamba.sh\" ]; then\\n . \"/opt/conda/etc/profile.d/mamba.sh\"\\nfi\\n# <<< conda initialize <<<\\n', persist=True)], volumes=[Volume(name='calrissian-volume', claim_name='calrissian-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteMany'], volume_mount=VolumeMount(name='calrissian-volume', mount_path='/calrissian'), persist=False), Volume(name='workspace-volume', claim_name='workspace-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteOnce'], volume_mount=VolumeMount(name='workspace-volume', mount_path='/workspace'), persist=True)], pod_env_vars={'HOME': '/workspace', 'CONDA_ENVS_PATH': ' /workspace/.envs'}, default_url=None, node_selector={}, role_bindings=None, image_pull_secrets=[], init_containers=[], manifests=None),\n",
+ " Profile(id='profile_demo_init_script', groups=['group-a', 'group-b'], definition=ProfileDefinition(display_name='Coder demo init script', description='This profile is used to demonstrate the use of an init script', slug='eoepca_demo_init_script', default=False, kubespawner_override=KubespawnerOverride(cpu_limit=2, cpu_guarantee=1, mem_limit='6G', mem_guarantee='4G', image='eoepca/pde-code-server:develop', extra_resource_limits={}, extra_resource_guarantees={})), config_maps=[ConfigMap(name='init', key='init', mount_path='/opt/init/.init.sh', default_mode='0660', readonly=True, content=\"set -x\\n\\ncd /workspace\\n\\ngit clone 'https://github.com/eoap/mastering-app-package.git'\\n\\ncode-server --install-extension ms-python.python\\ncode-server --install-extension redhat.vscode-yaml\\ncode-server --install-extension sbg-rabix.benten-cwl\\ncode-server --install-extension ms-toolsai.jupyter\\n\\nln -s /workspace/.local/share/code-server/extensions /workspace/extensions\\n\\nexit 0\\n\", persist=False)], volumes=[Volume(name='calrissian-volume', claim_name='calrissian-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteMany'], volume_mount=VolumeMount(name='calrissian-volume', mount_path='/calrissian'), persist=False), Volume(name='workspace-volume', claim_name='workspace-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteOnce'], volume_mount=VolumeMount(name='workspace-volume', mount_path='/workspace'), persist=True)], pod_env_vars={'HOME': '/workspace', 'CONDA_ENVS_PATH': '/workspace/.envs', 'CONDARC': '/workspace/.condarc', 'XDG_RUNTIME_DIR': '/workspace/.local', 'CODE_SERVER_WS': '/workspace/mastering-app-package'}, default_url=None, node_selector={}, role_bindings=None, image_pull_secrets=[], init_containers=[InitContainer(name='init-file-on-volume', image='eoepca/pde-code-server:develop', command=['sh', '-c', 'sh /opt/init/.init.sh'], volume_mounts=[VolumeMount(name='workspace-volume', mount_path='/workspace'), InitContainerVolumeMount(name='init', mount_path='/opt/init/.init.sh', sub_path='init')])], manifests=None),\n",
+ " Profile(id='profile_jupyter_lab', groups=['group-c'], definition=ProfileDefinition(display_name='Jupyter Lab', description='Jupyter Lab with Python 3.11', slug='eoepca_jupyter_lab', default=False, kubespawner_override=KubespawnerOverride(cpu_limit=2, cpu_guarantee=1, mem_limit='6G', mem_guarantee='4G', image='jupyter/scipy-notebook', extra_resource_limits={}, extra_resource_guarantees={})), config_maps=[], volumes=[Volume(name='workspace-volume', claim_name='workspace-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteOnce'], volume_mount=VolumeMount(name='workspace-volume', mount_path='/workspace'), persist=True)], pod_env_vars={'HOME': '/workspace', 'XDG_RUNTIME_DIR': '/workspace/.local', 'XDG_CONFIG_HOME': '/workspace/.config'}, default_url=None, node_selector={}, role_bindings=None, image_pull_secrets=[], init_containers=[], manifests=None),\n",
+ " Profile(id='profile_jupyter_lab_2', groups=['group-c'], definition=ProfileDefinition(display_name='Jupyter Lab - profile 2', description='Jupyter Lab with Python 3.11 - demoes the use of an image pull secret', slug='eoepca_jupyter_lab_2', default=False, kubespawner_override=KubespawnerOverride(cpu_limit=2, cpu_guarantee=1, mem_limit='6G', mem_guarantee='4G', image='jupyter/scipy-notebook', extra_resource_limits={}, extra_resource_guarantees={})), config_maps=[], volumes=[Volume(name='workspace-volume', claim_name='workspace-claim', size='50Gi', storage_class='standard', access_modes=['ReadWriteOnce'], volume_mount=VolumeMount(name='workspace-volume', mount_path='/workspace'), persist=True)], pod_env_vars={'HOME': '/workspace', 'XDG_RUNTIME_DIR': '/workspace/.local', 'XDG_CONFIG_HOME': '/workspace/.config'}, default_url=None, node_selector={}, role_bindings=None, image_pull_secrets=[ImagePullSecret(name='cr-config', persist=False, data='')], init_containers=[], manifests=None)]"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "profiles"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "base",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.8"
+ },
+ "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/config-generator/config-maps/bash-login b/config-generator/config-maps/bash-login
new file mode 100644
index 0000000..743cb42
--- /dev/null
+++ b/config-generator/config-maps/bash-login
@@ -0,0 +1 @@
+source /workspace/.bashrc
diff --git a/config-generator/config-maps/bash-rc b/config-generator/config-maps/bash-rc
new file mode 100644
index 0000000..6623894
--- /dev/null
+++ b/config-generator/config-maps/bash-rc
@@ -0,0 +1,22 @@
+alias ll="ls -l"
+alias calrissian="/opt/conda/bin/calrissian --pod-nodeselectors /etc/calrissian/pod-node-selector.yml --stdout /calrissian/results.json --max-ram 16G --max-cores "8" --tmp-outdir-prefix /calrissian/tmp/ --outdir /calrissian/"
+alias cwltool="/opt/conda/bin/cwltool --podman"
+. /home/jovyan/.bashrc
+# >>> conda initialize >>>
+# !! Contents within this block are managed by 'conda init' !!
+__conda_setup="$('/opt/conda/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
+if [ $? -eq 0 ]; then
+ eval "$__conda_setup"
+else
+ if [ -f "/opt/conda/etc/profile.d/conda.sh" ]; then
+ . "/opt/conda/etc/profile.d/conda.sh"
+ else
+ export PATH="/srv/conda/bin:$PATH"
+ fi
+fi
+unset __conda_setup
+
+if [ -f "/opt/conda/etc/profile.d/mamba.sh" ]; then
+ . "/opt/conda/etc/profile.d/mamba.sh"
+fi
+# <<< conda initialize <<<
diff --git a/config-generator/config-maps/conda-rc.yml b/config-generator/config-maps/conda-rc.yml
new file mode 100644
index 0000000..f8a34eb
--- /dev/null
+++ b/config-generator/config-maps/conda-rc.yml
@@ -0,0 +1,5 @@
+auto_update_conda: false
+show_channel_urls: true
+channels:
+ - conda-forge
+ - terradue
diff --git a/config-generator/config-maps/init.sh b/config-generator/config-maps/init.sh
new file mode 100644
index 0000000..014fdf3
--- /dev/null
+++ b/config-generator/config-maps/init.sh
@@ -0,0 +1,14 @@
+set -x
+
+cd /workspace
+
+git clone 'https://github.com/eoap/mastering-app-package.git'
+
+code-server --install-extension ms-python.python
+code-server --install-extension redhat.vscode-yaml
+code-server --install-extension sbg-rabix.benten-cwl
+code-server --install-extension ms-toolsai.jupyter
+
+ln -s /workspace/.local/share/code-server/extensions /workspace/extensions
+
+exit 0
diff --git a/config-generator/models.py b/config-generator/models.py
new file mode 100644
index 0000000..aee37fa
--- /dev/null
+++ b/config-generator/models.py
@@ -0,0 +1,148 @@
+from enum import Enum
+from typing import Dict, List, Optional, Union
+
+from pydantic import BaseModel
+
+
+class ConfigMapKeyRef(BaseModel):
+ name: str
+ key: str
+
+
+class ConfigMapEnvVarReference(BaseModel):
+ from_config_map: ConfigMapKeyRef
+
+
+class SubjectKind(str, Enum):
+ service_account = "ServiceAccount"
+ user = "User"
+
+
+class Verb(str, Enum):
+ get = "get"
+ list = "list"
+ watch = "watch"
+ create = "create"
+ update = "update"
+ patch = "patch"
+ delete = "delete"
+ deletecollection = "deletecollection"
+
+
+class Subject(BaseModel):
+ name: str
+ kind: SubjectKind
+
+
+class Role(BaseModel):
+ name: str
+ resources: List[str]
+ verbs: List[Verb]
+ api_groups: Optional[List[str]] = [""]
+
+
+class RoleBinding(BaseModel):
+ name: str
+ subjects: List[Subject]
+ role: Role
+ persist: bool = True
+
+
+class VolumeMount(BaseModel):
+ """volume mount object"""
+
+ name: str
+ mount_path: str
+
+
+class InitContainerVolumeMount(VolumeMount):
+ sub_path: str
+
+
+class Volume(BaseModel):
+ """volume object"""
+
+ name: str
+ claim_name: str
+ size: str
+ storage_class: str
+ access_modes: List[str]
+ volume_mount: VolumeMount
+ persist: bool
+
+
+class Manifest(BaseModel):
+ name: str
+ key: str
+ content: Optional[str] = None
+ persist: Optional[bool] = True
+
+
+class ConfigMap(BaseModel):
+ """config map object"""
+
+ name: str
+ key: str
+ mount_path: Optional[str] = None
+ default_mode: Optional[str] = None
+ readonly: bool
+ content: Optional[str] = None
+ persist: Optional[bool] = True
+
+
+class KubespawnerOverride(BaseModel):
+ """kubespawner override object"""
+
+ cpu_limit: int
+ cpu_guarantee: Optional[int] = None
+ mem_limit: str
+ mem_guarantee: Optional[str] = None
+ image: str
+ extra_resource_limits: Optional[dict] = {}
+ extra_resource_guarantees: Optional[dict] = {}
+
+
+class InitContainer(BaseModel):
+ name: str
+ image: str
+ command: List[str]
+ volume_mounts: list[VolumeMount | InitContainerVolumeMount]
+
+
+class ProfileDefinition(BaseModel):
+ """profile definition object"""
+
+ display_name: str
+ description: Optional[str] = None
+ slug: str
+ default: bool
+ kubespawner_override: KubespawnerOverride
+
+
+class ImagePullSecret(BaseModel):
+ name: str
+ persist: bool = True
+ data: Optional[str] = None
+
+
+class Profile(BaseModel):
+ """profile object"""
+
+ id: str
+ groups: List[str]
+ definition: ProfileDefinition
+ config_maps: Optional[List[ConfigMap]] = None
+ volumes: Optional[List[Volume]] = None
+ pod_env_vars: Optional[Dict[str, Union[str, ConfigMapEnvVarReference]]] = None
+ default_url: Optional[str] = None
+ node_selector: dict
+ role_bindings: Optional[List[RoleBinding]] = None
+ image_pull_secrets: Optional[List[ImagePullSecret]] = []
+ init_containers: Optional[List[InitContainer]] = []
+ manifests: Optional[List[Manifest]] = None
+
+
+class Config(BaseModel):
+ """config object"""
+
+ profiles: List[Profile]
diff --git a/jupyterhub/.helmignore b/jupyterhub/.helmignore
new file mode 100644
index 0000000..05f3c98
--- /dev/null
+++ b/jupyterhub/.helmignore
@@ -0,0 +1,31 @@
+# Anything within the root folder of the Helm chart, where Chart.yaml resides,
+# will be embedded into the packaged Helm chart. This is reasonable since only
+# when the templates render after the chart has been packaged and distributed,
+# will the templates logic evaluate that determines if other files were
+# referenced, such as our our files/hub/jupyterhub_config.py.
+#
+# Here are files that we intentionally ignore to avoid them being packaged,
+# because we don't want to reference them from our templates anyhow.
+schema.yaml
+
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml
new file mode 100644
index 0000000..a6e5198
--- /dev/null
+++ b/jupyterhub/Chart.yaml
@@ -0,0 +1,41 @@
+annotations:
+ artifacthub.io/images: |
+ - image: jupyterhub/configurable-http-proxy:4.5.3
+ name: configurable-http-proxy
+ - image: jupyterhub/k8s-hub:2.0.0
+ name: k8s-hub
+ - image: jupyterhub/k8s-image-awaiter:2.0.0
+ name: k8s-image-awaiter
+ - image: jupyterhub/k8s-network-tools:2.0.0
+ name: k8s-network-tools
+ - image: jupyterhub/k8s-secret-sync:2.0.0
+ name: k8s-secret-sync
+ - image: jupyterhub/k8s-singleuser-sample:2.0.0
+ name: k8s-singleuser-sample
+ - image: k8s.gcr.io/kube-scheduler:v1.23.10
+ name: kube-scheduler
+ - image: k8s.gcr.io/pause:3.8
+ name: pause
+ - image: k8s.gcr.io/pause:3.8
+ name: pause
+ - image: traefik:v2.8.4
+ name: traefik
+apiVersion: v2
+appVersion: 3.0.0
+description: Multi-user Jupyter installation
+home: https://z2jh.jupyter.org
+icon: https://jupyterhub.github.io/helm-chart/images/hublogo.svg
+keywords:
+- jupyter
+- jupyterhub
+- z2jh
+kubeVersion: '>=1.20.0-0'
+maintainers:
+- email: erik@sundellopensource.se
+ name: Erik Sundell
+- name: Simon Li
+ url: https://github.com/manics/
+name: jupyterhub
+sources:
+- https://github.com/jupyterhub/zero-to-jupyterhub-k8s
+version: 2.0.0
diff --git a/jupyterhub/README.md b/jupyterhub/README.md
new file mode 100644
index 0000000..bb85ebb
--- /dev/null
+++ b/jupyterhub/README.md
@@ -0,0 +1,18 @@
+# JupyterHub Helm chart
+
+[![Documentation](https://img.shields.io/badge/Documentation-z2jh.jupyter.org-blue?logo=read-the-docs&logoColor=white)](https://z2jh.jupyter.org)
+[![GitHub](https://img.shields.io/badge/Source_code-github-blue?logo=github&logoColor=white)](https://github.com/jupyterhub/zero-to-jupyterhub-k8s)
+[![Discourse](https://img.shields.io/badge/Help_forum-discourse-blue?logo=discourse&logoColor=white)](https://discourse.jupyter.org/c/jupyterhub/z2jh-k8s)
+[![Gitter](https://img.shields.io/badge/Social_chat-gitter-blue?logo=gitter&logoColor=white)](https://gitter.im/jupyterhub/jupyterhub)
+
+[![Latest stable release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20stable%20release&url=https://jupyterhub.github.io/helm-chart/info.json&query=$.jupyterhub.stable&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#jupyterhub)
+[![Latest pre-release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20pre-release&url=https://jupyterhub.github.io/helm-chart/info.json&query=$.jupyterhub.pre&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#development-releases-jupyterhub)
+[![Latest development release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20dev%20release&url=https://jupyterhub.github.io/helm-chart/info.json&query=$.jupyterhub.latest&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#development-releases-jupyterhub)
+
+The JupyterHub Helm chart is accompanied with an installation guide at [z2jh.jupyter.org](https://z2jh.jupyter.org). Together they enable you to deploy [JupyterHub](https://jupyterhub.readthedocs.io) in a Kubernetes cluster that can make Jupyter environments available to several thousands of simultaneous users.
+
+## History
+
+Much of the initial groundwork for this documentation is information learned from the successful use of JupyterHub and Kubernetes at UC Berkeley in their [Data 8](http://data8.org/) program.
+
+![](https://raw.githubusercontent.com/jupyterhub/zero-to-jupyterhub-k8s/HEAD/docs/source/_static/images/data8_massive_audience.jpg)
diff --git a/jupyterhub/files/hub/config.yml b/jupyterhub/files/hub/config.yml
new file mode 100644
index 0000000..f2f679b
--- /dev/null
+++ b/jupyterhub/files/hub/config.yml
@@ -0,0 +1,303 @@
+profiles:
+- config_maps:
+ - content: "alias ll=\"ls -l\"\nalias calrissian=\"/opt/conda/bin/calrissian --pod-nodeselectors\
+ \ /etc/calrissian/pod-node-selector.yml --stdout /calrissian/results.json --max-ram\
+ \ 16G --max-cores \"8\" --tmp-outdir-prefix /calrissian/tmp/ --outdir /calrissian/\"\
+ \nalias cwltool=\"/opt/conda/bin/cwltool --podman\"\n. /home/jovyan/.bashrc\n\
+ # >>> conda initialize >>>\n# !! Contents within this block are managed by 'conda\
+ \ init' !!\n__conda_setup=\"$('/opt/conda/bin/conda' 'shell.bash' 'hook' 2>\
+ \ /dev/null)\"\nif [ $? -eq 0 ]; then\n eval \"$__conda_setup\"\nelse\n \
+ \ if [ -f \"/opt/conda/etc/profile.d/conda.sh\" ]; then\n . \"/opt/conda/etc/profile.d/conda.sh\"\
+ \n else\n export PATH=\"/srv/conda/bin:$PATH\"\n fi\nfi\nunset\
+ \ __conda_setup\n\nif [ -f \"/opt/conda/etc/profile.d/mamba.sh\" ]; then\n \
+ \ . \"/opt/conda/etc/profile.d/mamba.sh\"\nfi\n# <<< conda initialize <<<\n"
+ default_mode: null
+ key: bash-rc
+ mount_path: /workspace/.bashrc
+ name: bash-rc
+ persist: true
+ readonly: true
+ default_url: null
+ definition:
+ default: false
+ description: null
+ display_name: Code Server Small
+ kubespawner_override:
+ cpu_guarantee: null
+ cpu_limit: 2
+ extra_resource_guarantees: {}
+ extra_resource_limits: {}
+ image: eoepca/pde-code-server:develop
+ mem_guarantee: null
+ mem_limit: 8G
+ slug: ellip_studio_coder_slug_s
+ groups:
+ - group-a
+ - group-b
+ id: profile_studio_coder1
+ image_pull_secrets: []
+ init_containers: []
+ manifests: null
+ node_selector: {}
+ pod_env_vars:
+ CONDA_ENVS_PATH: ' /workspace/.envs'
+ HOME: /workspace
+ role_bindings: null
+ volumes:
+ - access_modes:
+ - ReadWriteMany
+ claim_name: calrissian-claim
+ name: calrissian-volume
+ persist: false
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /calrissian
+ name: calrissian-volume
+ - access_modes:
+ - ReadWriteOnce
+ claim_name: workspace-claim
+ name: workspace-volume
+ persist: true
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /workspace
+ name: workspace-volume
+- config_maps:
+ - content: "alias ll=\"ls -l\"\nalias calrissian=\"/opt/conda/bin/calrissian --pod-nodeselectors\
+ \ /etc/calrissian/pod-node-selector.yml --stdout /calrissian/results.json --max-ram\
+ \ 16G --max-cores \"8\" --tmp-outdir-prefix /calrissian/tmp/ --outdir /calrissian/\"\
+ \nalias cwltool=\"/opt/conda/bin/cwltool --podman\"\n. /home/jovyan/.bashrc\n\
+ # >>> conda initialize >>>\n# !! Contents within this block are managed by 'conda\
+ \ init' !!\n__conda_setup=\"$('/opt/conda/bin/conda' 'shell.bash' 'hook' 2>\
+ \ /dev/null)\"\nif [ $? -eq 0 ]; then\n eval \"$__conda_setup\"\nelse\n \
+ \ if [ -f \"/opt/conda/etc/profile.d/conda.sh\" ]; then\n . \"/opt/conda/etc/profile.d/conda.sh\"\
+ \n else\n export PATH=\"/srv/conda/bin:$PATH\"\n fi\nfi\nunset\
+ \ __conda_setup\n\nif [ -f \"/opt/conda/etc/profile.d/mamba.sh\" ]; then\n \
+ \ . \"/opt/conda/etc/profile.d/mamba.sh\"\nfi\n# <<< conda initialize <<<\n"
+ default_mode: null
+ key: bash-rc
+ mount_path: /workspace/.bashrc
+ name: bash-rc
+ persist: true
+ readonly: true
+ default_url: null
+ definition:
+ default: false
+ description: null
+ display_name: Code Server Medium
+ kubespawner_override:
+ cpu_guarantee: null
+ cpu_limit: 4
+ extra_resource_guarantees: {}
+ extra_resource_limits: {}
+ image: eoepca/pde-code-server:develop
+ mem_guarantee: null
+ mem_limit: 12G
+ slug: ellip_studio_coder_slug_m
+ groups:
+ - group-a
+ - group-b
+ id: profile_studio_coder2
+ image_pull_secrets: []
+ init_containers: []
+ manifests: null
+ node_selector: {}
+ pod_env_vars:
+ CONDA_ENVS_PATH: ' /workspace/.envs'
+ HOME: /workspace
+ role_bindings: null
+ volumes:
+ - access_modes:
+ - ReadWriteMany
+ claim_name: calrissian-claim
+ name: calrissian-volume
+ persist: false
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /calrissian
+ name: calrissian-volume
+ - access_modes:
+ - ReadWriteOnce
+ claim_name: workspace-claim
+ name: workspace-volume
+ persist: true
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /workspace
+ name: workspace-volume
+- config_maps:
+ - content: 'set -x
+
+
+ cd /workspace
+
+
+ git clone ''https://github.com/eoap/mastering-app-package.git''
+
+
+ code-server --install-extension ms-python.python
+
+ code-server --install-extension redhat.vscode-yaml
+
+ code-server --install-extension sbg-rabix.benten-cwl
+
+ code-server --install-extension ms-toolsai.jupyter
+
+
+ ln -s /workspace/.local/share/code-server/extensions /workspace/extensions
+
+
+ exit 0
+
+ '
+ default_mode: '0660'
+ key: init
+ mount_path: /opt/init/.init.sh
+ name: init
+ persist: false
+ readonly: true
+ default_url: null
+ definition:
+ default: false
+ description: This profile is used to demonstrate the use of an init script
+ display_name: Coder demo init script
+ kubespawner_override:
+ cpu_guarantee: 1
+ cpu_limit: 2
+ extra_resource_guarantees: {}
+ extra_resource_limits: {}
+ image: eoepca/pde-code-server:develop
+ mem_guarantee: 4G
+ mem_limit: 6G
+ slug: eoepca_demo_init_script
+ groups:
+ - group-a
+ - group-b
+ id: profile_demo_init_script
+ image_pull_secrets: []
+ init_containers:
+ - command:
+ - sh
+ - -c
+ - sh /opt/init/.init.sh
+ image: eoepca/pde-code-server:develop
+ name: init-file-on-volume
+ volume_mounts:
+ - mount_path: /workspace
+ name: workspace-volume
+ - mount_path: /opt/init/.init.sh
+ name: init
+ sub_path: init
+ manifests: null
+ node_selector: {}
+ pod_env_vars:
+ CODE_SERVER_WS: /workspace/mastering-app-package
+ CONDARC: /workspace/.condarc
+ CONDA_ENVS_PATH: /workspace/.envs
+ HOME: /workspace
+ XDG_RUNTIME_DIR: /workspace/.local
+ role_bindings: null
+ volumes:
+ - access_modes:
+ - ReadWriteMany
+ claim_name: calrissian-claim
+ name: calrissian-volume
+ persist: false
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /calrissian
+ name: calrissian-volume
+ - access_modes:
+ - ReadWriteOnce
+ claim_name: workspace-claim
+ name: workspace-volume
+ persist: true
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /workspace
+ name: workspace-volume
+- config_maps: []
+ default_url: null
+ definition:
+ default: false
+ description: Jupyter Lab with Python 3.11
+ display_name: Jupyter Lab
+ kubespawner_override:
+ cpu_guarantee: 1
+ cpu_limit: 2
+ extra_resource_guarantees: {}
+ extra_resource_limits: {}
+ image: jupyter/scipy-notebook
+ mem_guarantee: 4G
+ mem_limit: 6G
+ slug: eoepca_jupyter_lab
+ groups:
+ - group-c
+ id: profile_jupyter_lab
+ image_pull_secrets: []
+ init_containers: []
+ manifests: null
+ node_selector: {}
+ pod_env_vars:
+ HOME: /workspace
+ XDG_CONFIG_HOME: /workspace/.config
+ XDG_RUNTIME_DIR: /workspace/.local
+ role_bindings: null
+ volumes:
+ - access_modes:
+ - ReadWriteOnce
+ claim_name: workspace-claim
+ name: workspace-volume
+ persist: true
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /workspace
+ name: workspace-volume
+- config_maps: []
+ default_url: null
+ definition:
+ default: false
+ description: Jupyter Lab with Python 3.11 - demoes the use of an image pull secret
+ display_name: Jupyter Lab - profile 2
+ kubespawner_override:
+ cpu_guarantee: 1
+ cpu_limit: 2
+ extra_resource_guarantees: {}
+ extra_resource_limits: {}
+ image: jupyter/scipy-notebook
+ mem_guarantee: 4G
+ mem_limit: 6G
+ slug: eoepca_jupyter_lab_2
+ groups:
+ - group-c
+ id: profile_jupyter_lab_2
+ image_pull_secrets:
+ - data: ''
+ name: cr-config
+ persist: false
+ init_containers: []
+ manifests: null
+ node_selector: {}
+ pod_env_vars:
+ HOME: /workspace
+ XDG_CONFIG_HOME: /workspace/.config
+ XDG_RUNTIME_DIR: /workspace/.local
+ role_bindings: null
+ volumes:
+ - access_modes:
+ - ReadWriteOnce
+ claim_name: workspace-claim
+ name: workspace-volume
+ persist: true
+ size: 50Gi
+ storage_class: standard
+ volume_mount:
+ mount_path: /workspace
+ name: workspace-volume
diff --git a/jupyterhub/files/hub/jupyterhub_config.py b/jupyterhub/files/hub/jupyterhub_config.py
new file mode 100644
index 0000000..5e6fb9e
--- /dev/null
+++ b/jupyterhub/files/hub/jupyterhub_config.py
@@ -0,0 +1,179 @@
+import os
+import sys
+
+from tornado.httpclient import AsyncHTTPClient
+
+
+from application_hub_context.app_hub_context import DefaultApplicationHubContext
+
+
+configuration_directory = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, configuration_directory)
+
+from z2jh import (
+ get_config,
+ get_name,
+ get_name_env,
+)
+
+config_path = "/usr/local/etc/jupyterhub/config.yml"
+
+namespace_prefix = "jupyter"
+
+
+def custom_options_form(spawner):
+
+ spawner.log.info("Configure profile list")
+
+ namespace = f"{namespace_prefix}-{spawner.user.name}"
+
+ workspace = DefaultApplicationHubContext(
+ namespace=namespace,
+ spawner=spawner,
+ config_path=config_path,
+ )
+
+ spawner.profile_list = workspace.get_profile_list()
+
+ return spawner._options_form_default()
+
+
+def pre_spawn_hook(spawner):
+
+ profile_slug = spawner.user_options.get("profile", None)
+
+ env = os.environ["JUPYTERHUB_ENV"].lower()
+
+ spawner.environment["CALRISSIAN_POD_NAME"] = f"jupyter-{spawner.user.name}-{env}"
+
+ spawner.log.info(f"Using profile slug {profile_slug}")
+
+ namespace = f"{namespace_prefix}-{spawner.user.name}"
+
+ workspace = DefaultApplicationHubContext(
+ namespace=namespace, spawner=spawner, config_path=config_path
+ )
+
+ workspace.initialise()
+
+ profile_id = workspace.config_parser.get_profile_by_slug(slug=profile_slug).id
+
+ default_url = workspace.config_parser.get_profile_default_url(profile_id=profile_id)
+
+ if default_url:
+ spawner.log.info(f"Setting default url to {default_url}")
+ spawner.default_url = default_url
+
+
+def post_stop_hook(spawner):
+
+ namespace = f"jupyter-{spawner.user.name}"
+
+ workspace = DefaultApplicationHubContext(
+ namespace=namespace, spawner=spawner, config_path=config_path
+ )
+ spawner.log.info("Dispose in post stop hook")
+ workspace.dispose()
+
+
+c.JupyterHub.default_url = "spawn"
+
+
+# Configure JupyterHub to use the curl backend for making HTTP requests,
+# rather than the pure-python implementations. The default one starts
+# being too slow to make a large number of requests to the proxy API
+# at the rate required.
+AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+
+c.ConfigurableHTTPProxy.api_url = (
+ f'http://{get_name("proxy-api")}:{get_name_env("proxy-api", "_SERVICE_PORT")}'
+)
+# c.ConfigurableHTTPProxy.should_start = False
+
+# Don't wait at all before redirecting a spawning user to the progress page
+c.JupyterHub.tornado_settings = {
+ "slow_spawn_timeout": 0,
+}
+
+jupyterhub_env = os.environ["JUPYTERHUB_ENV"].upper()
+jupyterhub_hub_host = "hub.jupyter"
+jupyterhub_single_user_image = os.environ["JUPYTERHUB_SINGLE_USER_IMAGE_NOTEBOOKS"]
+
+# Authentication
+c.LocalAuthenticator.create_system_users = True
+c.Authenticator.admin_users = {"jovyan"}
+# Deprecated
+c.Authenticator.allowed_users = {"jovyan"}
+c.JupyterHub.authenticator_class = "dummy"
+
+# HTTP Proxy auth token
+c.ConfigurableHTTPProxy.auth_token = get_config("proxy.secretToken")
+c.JupyterHub.cookie_secret_file = "/srv/jupyterhub/cookie_secret"
+# Proxy config
+c.JupyterHub.cleanup_servers = False
+# Network
+c.JupyterHub.allow_named_servers = True
+c.JupyterHub.ip = "0.0.0.0"
+c.JupyterHub.hub_ip = "0.0.0.0"
+c.JupyterHub.hub_connect_ip = jupyterhub_hub_host
+# Misc
+c.JupyterHub.cleanup_servers = False
+
+# Culling
+c.JupyterHub.services = [
+ {
+ "name": "idle-culler",
+ "admin": True,
+ "command": [sys.executable, "-m", "jupyterhub_idle_culler", "--timeout=3600"],
+ }
+]
+
+# Logs
+c.JupyterHub.log_level = "DEBUG"
+
+# Spawner
+c.JupyterHub.spawner_class = "kubespawner.KubeSpawner"
+c.KubeSpawner.environment = {
+ "JUPYTER_ENABLE_LAB": "true",
+}
+
+c.KubeSpawner.uid = 1001
+c.KubeSpawner.fs_gid = 100
+c.KubeSpawner.hub_connect_ip = jupyterhub_hub_host
+
+# SecurityContext
+c.KubeSpawner.privileged = True
+c.KubeSpawner.allow_privilege_escalation = True
+
+# ServiceAccount
+c.KubeSpawner.service_account = "default"
+c.KubeSpawner.start_timeout = 60 * 15
+c.KubeSpawner.image = jupyterhub_single_user_image
+c.KubernetesSpawner.verify_ssl = True
+c.KubeSpawner.pod_name_template = (
+ "jupyter-{username}-{servername}-" + os.environ["JUPYTERHUB_ENV"].lower()
+)
+
+# Namespace
+c.KubeSpawner.namespace = "jupyter"
+
+# User namespace
+c.KubeSpawner.enable_user_namespaces = True
+
+# Volumes
+# volumes are managed by the pre_spawn_hook/post_stop_hook
+
+# TODO - move this value to the values.yaml file
+c.KubeSpawner.image_pull_secrets = ["cr-config"]
+
+# custom options form
+c.KubeSpawner.options_form = custom_options_form
+
+# hooks
+c.KubeSpawner.pre_spawn_hook = pre_spawn_hook
+c.KubeSpawner.post_stop_hook = post_stop_hook
+
+c.JupyterHub.template_paths = [
+ "/opt/jupyterhub/template",
+ "/usr/local/share/jupyterhub/templates",
+]
diff --git a/jupyterhub/files/hub/z2jh.py b/jupyterhub/files/hub/z2jh.py
new file mode 100644
index 0000000..57e463f
--- /dev/null
+++ b/jupyterhub/files/hub/z2jh.py
@@ -0,0 +1,121 @@
+"""
+Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
+
+Methods here can be imported by extraConfig in values.yaml
+"""
+# from collections import Mapping
+from collections.abc import Mapping
+from functools import lru_cache
+import os
+
+import yaml
+
+# memoize so we only load config once
+@lru_cache()
+def _load_config():
+ """Load the Helm chart configuration used to render the Helm templates of
+ the chart from a mounted k8s Secret, and merge in values from an optionally
+ mounted secret (hub.existingSecret)."""
+
+ cfg = {}
+ for source in ("secret/values.yaml", "existing-secret/values.yaml"):
+ path = f"/usr/local/etc/jupyterhub/{source}"
+ if os.path.exists(path):
+ print(f"Loading {path}")
+ with open(path) as f:
+ values = yaml.safe_load(f)
+ cfg = _merge_dictionaries(cfg, values)
+ else:
+ print(f"No config at {path}")
+ return cfg
+
+
+@lru_cache()
+def _get_config_value(key):
+ """Load value from the k8s ConfigMap given a key."""
+
+ path = f"/usr/local/etc/jupyterhub/config/{key}"
+ if os.path.exists(path):
+ with open(path) as f:
+ return f.read()
+ else:
+ raise Exception(f"{path} not found!")
+
+
+@lru_cache()
+def get_secret_value(key, default="never-explicitly-set"):
+ """Load value from the user managed k8s Secret or the default k8s Secret
+ given a key."""
+
+ for source in ("existing-secret", "secret"):
+ path = f"/usr/local/etc/jupyterhub/{source}/{key}"
+ if os.path.exists(path):
+ with open(path) as f:
+ return f.read()
+ if default != "never-explicitly-set":
+ return default
+ raise Exception(f"{key} not found in either k8s Secret!")
+
+
+def get_name(name):
+ """Returns the fullname of a resource given its short name"""
+ return _get_config_value(name)
+
+
+def get_name_env(name, suffix=""):
+ """Returns the fullname of a resource given its short name along with a
+ suffix, converted to uppercase with dashes replaced with underscores. This
+ is useful to reference named services associated environment variables, such
+ as PROXY_PUBLIC_SERVICE_PORT."""
+ env_key = _get_config_value(name) + suffix
+ env_key = env_key.upper().replace("-", "_")
+ return os.environ[env_key]
+
+
+def _merge_dictionaries(a, b):
+ """Merge two dictionaries recursively.
+
+ Simplified From https://stackoverflow.com/a/7205107
+ """
+ merged = a.copy()
+ for key in b:
+ if key in a:
+ if isinstance(a[key], Mapping) and isinstance(b[key], Mapping):
+ merged[key] = _merge_dictionaries(a[key], b[key])
+ else:
+ merged[key] = b[key]
+ else:
+ merged[key] = b[key]
+ return merged
+
+
+def get_config(key, default=None):
+ """
+ Find a config item of a given name & return it
+
+ Parses everything as YAML, so lists and dicts are available too
+
+ get_config("a.b.c") returns config['a']['b']['c']
+ """
+ value = _load_config()
+ # resolve path in yaml
+ for level in key.split("."):
+ if not isinstance(value, dict):
+ # a parent is a scalar or null,
+ # can't resolve full path
+ return default
+ if level not in value:
+ return default
+ else:
+ value = value[level]
+ return value
+
+
+def set_config_if_not_none(cparent, name, key):
+ """
+ Find a config item of a given name, set the corresponding Jupyter
+ configuration item if not None
+ """
+ data = get_config(key)
+ if data is not None:
+ setattr(cparent, name, data)
diff --git a/jupyterhub/files/theme/page.html b/jupyterhub/files/theme/page.html
new file mode 100644
index 0000000..6ee147a
--- /dev/null
+++ b/jupyterhub/files/theme/page.html
@@ -0,0 +1,5 @@
+{% extends "templates/page.html" %}
+
+{% set announcement = 'EOEPCA+ ApplicationHub demonstration instance' %}
+
+{% block title %}ApplicationHub{% endblock %}
diff --git a/jupyterhub/files/theme/spawn.html b/jupyterhub/files/theme/spawn.html
new file mode 100644
index 0000000..639636b
--- /dev/null
+++ b/jupyterhub/files/theme/spawn.html
@@ -0,0 +1,7 @@
+{% extends "templates/spawn.html" %}
+
+{% block heading %}
+
+
Available demonstration applications
+
+{% endblock %}
diff --git a/jupyterhub/files/theme/spawn_pending.html b/jupyterhub/files/theme/spawn_pending.html
new file mode 100644
index 0000000..f2b2e7c
--- /dev/null
+++ b/jupyterhub/files/theme/spawn_pending.html
@@ -0,0 +1,94 @@
+{% extends "page.html" %}
+
+{% block main %}
+
+
+
+
+ {% block message %}
+
Your pod is starting up.
+
You will be redirected automatically when it's ready for you.
+ {% endblock %}
+
+
+
+
+
+
+
+{% endblock %}
+
+{% block script %}
+{{ super() }}
+
+{% endblock %}
diff --git a/jupyterhub/requirements.txt b/jupyterhub/requirements.txt
new file mode 100644
index 0000000..e69de29
diff --git a/jupyterhub/templates/NOTES.txt b/jupyterhub/templates/NOTES.txt
new file mode 100644
index 0000000..e9a4edf
--- /dev/null
+++ b/jupyterhub/templates/NOTES.txt
@@ -0,0 +1,153 @@
+{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
+
+{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
+. __ __ __ __ __
+ / / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
+ __ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
+/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
+\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
+ /_/ /____/
+
+ You have successfully installed the official JupyterHub Helm chart!
+
+### Installation info
+
+ - Kubernetes namespace: {{ .Release.Namespace }}
+ - Helm release name: {{ .Release.Name }}
+ - Helm chart version: {{ .Chart.Version }}
+ - JupyterHub version: {{ .Chart.AppVersion }}
+ - Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
+
+### Followup links
+
+ - Documentation: https://z2jh.jupyter.org
+ - Help forum: https://discourse.jupyter.org
+ - Social chat: https://gitter.im/jupyterhub/jupyterhub
+ - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
+
+### Post-installation checklist
+
+ - Verify that created Pods enter a Running state:
+
+ kubectl --namespace={{ .Release.Namespace }} get pod
+
+ If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe pod
+
+ If a pod keeps restarting, diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} logs --previous
+ {{- println }}
+
+ {{- if eq .Values.proxy.service.type "LoadBalancer" }}
+ - Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
+
+ kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
+
+ If the external ip remains , diagnose with:
+
+ kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
+ {{- end }}
+
+ - Verify web based access:
+ {{- println }}
+ {{- if .Values.ingress.enabled }}
+ {{- range $host := .Values.ingress.hosts }}
+ Try insecure HTTP access: http://{{ $host }}{{ $.Values.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+
+ {{- range $tls := .Values.ingress.tls }}
+ {{- range $host := $tls.hosts }}
+ Try secure HTTPS access: https://{{ $host }}{{ $.Values.hub.baseUrl | trimSuffix "/" }}/
+ {{- end }}
+ {{- end }}
+ {{- else }}
+ You have not configured a k8s Ingress resource so you need to access the k8s
+ Service {{ $proxy_service }} directly.
+ {{- println }}
+
+ {{- if eq .Values.proxy.service.type "NodePort" }}
+ The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
+ that all the k8s cluster's nodes are exposing the k8s Service via those
+ ports.
+
+ Try insecure HTTP access: http://:{{ .Values.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
+ Try secure HTTPS access: https://:{{ .Values.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
+
+ {{- else }}
+ If your computer is outside the k8s cluster, you can port-forward traffic to
+ the k8s Service {{ $proxy_service }} with kubectl to access it from your
+ computer.
+
+ kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
+
+ Try insecure HTTP access: http://localhost:8080
+ {{- end }}
+ {{- end }}
+ {{- println }}
+
+
+
+
+
+{{- /*
+ Warnings for likely misconfiguration
+*/}}
+
+{{- if and (not .Values.scheduling.podPriority.enabled) (and .Values.scheduling.userPlaceholder.enabled .Values.scheduling.userPlaceholder.replicas) }}
+#################################################################################
+###### WARNING: You are using user placeholders without pod priority #####
+###### enabled*, either enable pod priority or stop using the #####
+###### user placeholders** to avoid having placeholders that #####
+###### refuse to make room for a real user. #####
+###### #####
+###### *scheduling.podPriority.enabled #####
+###### **scheduling.userPlaceholder.enabled #####
+###### **scheduling.userPlaceholder.replicas #####
+#################################################################################
+{{- println }}
+{{- end }}
+
+
+
+
+
+{{- /*
+ Breaking changes.
+*/}}
+
+{{- $breaking := "" }}
+{{- $breaking_title := "\n" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
+{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
+{{- $breaking_title = print $breaking_title "\n###### #####" }}
+{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
+{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
+{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
+
+
+{{- /*
+ This is an example (in a helm template comment) on how to detect and
+ communicate with regards to a breaking chart config change.
+
+ {{- if hasKey .Values.singleuser.cloudMetadata "enabled" }}
+ {{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
+ {{- end }}
+*/}}
+
+
+{{- if hasKey .Values.rbac "enabled" }}
+{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and .serviceAccount.create." }}
+{{- end }}
+
+
+{{- if hasKey .Values.hub "fsGid" }}
+{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
+{{- end }}
+
+
+{{- if $breaking }}
+{{- fail (print $breaking_title $breaking "\n\n") }}
+{{- end }}
diff --git a/jupyterhub/templates/_helpers-names.tpl b/jupyterhub/templates/_helpers-names.tpl
new file mode 100644
index 0000000..bbb5864
--- /dev/null
+++ b/jupyterhub/templates/_helpers-names.tpl
@@ -0,0 +1,306 @@
+{{- /*
+ These helpers encapsulates logic on how we name resources. They also enable
+ parent charts to reference these dynamic resource names.
+
+ To avoid duplicating documentation, for more information, please see the the
+ fullnameOverride entry in schema.yaml or the configuration reference that
+ schema.yaml renders to.
+
+ https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
+*/}}
+
+
+
+{{- /*
+ Utility templates
+*/}}
+
+{{- /*
+ Renders to a prefix for the chart's resource names. This prefix is assumed to
+ make the resource name cluster unique.
+*/}}
+{{- define "jupyterhub.fullname" -}}
+ {{- /*
+ We have implemented a trick to allow a parent chart depending on this
+ chart to call these named templates.
+
+ Caveats and notes:
+
+ 1. While parent charts can reference these, grandparent charts can't.
+ 2. Parent charts must not use an alias for this chart.
+ 3. There is no failsafe workaround to above due to
+ https://github.com/helm/helm/issues/9214.
+ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
+ using "toYaml | fromYaml" in order to be able to use normal helm
+ template functions on it.
+ */}}
+ {{- $fullname_override := .Values.fullnameOverride }}
+ {{- $name_override := .Values.nameOverride }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+ {{- if .Values.jupyterhub }}
+ {{- $fullname_override = .Values.jupyterhub.fullnameOverride }}
+ {{- $name_override = .Values.jupyterhub.nameOverride }}
+ {{- end }}
+ {{- end }}
+
+ {{- if eq (typeOf $fullname_override) "string" }}
+ {{- $fullname_override }}
+ {{- else }}
+ {{- $name := $name_override | default .Chart.Name }}
+ {{- if contains $name .Release.Name }}
+ {{- .Release.Name }}
+ {{- else }}
+ {{- .Release.Name }}-{{ $name }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- /*
+ Renders to a blank string or if the fullname template is truthy renders to it
+ with an appended dash.
+*/}}
+{{- define "jupyterhub.fullname.dash" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname" . }}-
+ {{- end }}
+{{- end }}
+
+
+
+{{- /*
+ Namespaced resources
+*/}}
+
+{{- /* hub Deployment */}}
+{{- define "jupyterhub.hub.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}hub
+{{- end }}
+
+{{- /* hub-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
+ {{- if .Values.hub.serviceAccount.create }}
+ {{- .Values.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
+ {{- else }}
+ {{- .Values.hub.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
+{{- /* hub-existing-secret Secret */}}
+{{- define "jupyterhub.hub-existing-secret.fullname" -}}
+ {{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
+ {{- $existing_secret := .Values.hub.existingSecret }}
+ {{- if ne .Chart.Name "jupyterhub" }}
+ {{- $existing_secret = .Values.jupyterhub.hub.existingSecret }}
+ {{- end }}
+ {{- if $existing_secret }}
+ {{- $existing_secret }}
+ {{- end }}
+{{- end }}
+
+{{- /* hub-existing-secret-or-default Secret */}}
+{{- define "jupyterhub.hub-existing-secret-or-default.fullname" -}}
+ {{- include "jupyterhub.hub-existing-secret.fullname" . | default (include "jupyterhub.hub.fullname" .) }}
+{{- end }}
+
+{{- /* hub PVC */}}
+{{- define "jupyterhub.hub-pvc.fullname" -}}
+ {{- include "jupyterhub.hub.fullname" . }}-db-dir
+{{- end }}
+
+{{- /* proxy Deployment */}}
+{{- define "jupyterhub.proxy.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}proxy
+{{- end }}
+
+{{- /* proxy-api Service */}}
+{{- define "jupyterhub.proxy-api.fullname" -}}
+ {{- include "jupyterhub.proxy.fullname" . }}-api
+{{- end }}
+
+{{- /* proxy-http Service */}}
+{{- define "jupyterhub.proxy-http.fullname" -}}
+ {{- include "jupyterhub.proxy.fullname" . }}-http
+{{- end }}
+
+{{- /* proxy-public Service */}}
+{{- define "jupyterhub.proxy-public.fullname" -}}
+ {{- include "jupyterhub.proxy.fullname" . }}-public
+{{- end }}
+
+{{- /* proxy-public-tls Secret */}}
+{{- define "jupyterhub.proxy-public-tls.fullname" -}}
+ {{- include "jupyterhub.proxy-public.fullname" . }}-tls-acme
+{{- end }}
+
+{{- /* proxy-public-manual-tls Secret */}}
+{{- define "jupyterhub.proxy-public-manual-tls.fullname" -}}
+ {{- include "jupyterhub.proxy-public.fullname" . }}-manual-tls
+{{- end }}
+
+{{- /* autohttps Deployment */}}
+{{- define "jupyterhub.autohttps.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}autohttps
+{{- end }}
+
+{{- /* autohttps-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
+ {{- if .Values.proxy.traefik.serviceAccount.create }}
+ {{- .Values.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
+ {{- else }}
+ {{- .Values.proxy.traefik.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
+{{- /* user-scheduler Deployment */}}
+{{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}user-scheduler
+{{- end }}
+
+{{- /* user-scheduler-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
+ {{- if .Values.scheduling.userScheduler.serviceAccount.create }}
+ {{- .Values.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
+ {{- else }}
+ {{- .Values.scheduling.userScheduler.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
+{{- /* user-scheduler leader election lock resource */}}
+{{- define "jupyterhub.user-scheduler-lock.fullname" -}}
+ {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
+{{- end }}
+
+{{- /* user-placeholder StatefulSet */}}
+{{- define "jupyterhub.user-placeholder.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}user-placeholder
+{{- end }}
+
+{{- /* image-awaiter Job */}}
+{{- define "jupyterhub.hook-image-awaiter.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
+{{- end }}
+
+{{- /* image-awaiter-serviceaccount ServiceAccount */}}
+{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
+ {{- if .Values.prePuller.hook.serviceAccount.create }}
+ {{- .Values.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
+ {{- else }}
+ {{- .Values.prePuller.hook.serviceAccount.name | default "default" }}
+ {{- end }}
+{{- end }}
+
+{{- /* hook-image-puller DaemonSet */}}
+{{- define "jupyterhub.hook-image-puller.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}hook-image-puller
+{{- end }}
+
+{{- /* continuous-image-puller DaemonSet */}}
+{{- define "jupyterhub.continuous-image-puller.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}continuous-image-puller
+{{- end }}
+
+{{- /* singleuser NetworkPolicy */}}
+{{- define "jupyterhub.singleuser.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}singleuser
+{{- end }}
+
+{{- /* image-pull-secret Secret */}}
+{{- define "jupyterhub.image-pull-secret.fullname" -}}
+ {{- include "jupyterhub.fullname.dash" . }}image-pull-secret
+{{- end }}
+
+{{- /* Ingress */}}
+{{- define "jupyterhub.ingress.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname" . }}
+ {{- else -}}
+ jupyterhub
+ {{- end }}
+{{- end }}
+
+
+
+{{- /*
+ Cluster wide resources
+
+ We enforce uniqueness of names for our cluster wide resources. We assume that
+ the prefix from setting fullnameOverride to null or a string will be cluster
+ unique.
+*/}}
+
+{{- /* Priority */}}
+{{- define "jupyterhub.priority.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname" . }}
+ {{- else }}
+ {{- .Release.Name }}-default-priority
+ {{- end }}
+{{- end }}
+
+{{- /* user-placeholder Priority */}}
+{{- define "jupyterhub.user-placeholder-priority.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.user-placeholder.fullname" . }}
+ {{- else }}
+ {{- .Release.Name }}-user-placeholder-priority
+ {{- end }}
+{{- end }}
+
+{{- /* image-puller Priority */}}
+{{- define "jupyterhub.image-puller-priority.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.fullname.dash" . }}image-puller
+ {{- else }}
+ {{- .Release.Name }}-image-puller-priority
+ {{- end }}
+{{- end }}
+
+{{- /* user-scheduler's registered name */}}
+{{- define "jupyterhub.user-scheduler.fullname" -}}
+ {{- if (include "jupyterhub.fullname" .) }}
+ {{- include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ {{- else }}
+ {{- .Release.Name }}-user-scheduler
+ {{- end }}
+{{- end }}
+
+
+
+{{- /*
+ A template to render all the named templates in this file for use in the
+ hub's ConfigMap.
+
+ It is important we keep this in sync with the available templates.
+*/}}
+{{- define "jupyterhub.name-templates" -}}
+fullname: {{ include "jupyterhub.fullname" . | quote }}
+fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
+hub: {{ include "jupyterhub.hub.fullname" . | quote }}
+hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
+hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
+hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
+hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
+proxy: {{ include "jupyterhub.proxy.fullname" . | quote }}
+proxy-api: {{ include "jupyterhub.proxy-api.fullname" . | quote }}
+proxy-http: {{ include "jupyterhub.proxy-http.fullname" . | quote }}
+proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
+proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
+proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
+autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
+autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
+user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
+user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
+user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
+user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
+image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
+hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
+hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
+hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
+continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
+singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
+image-pull-secret: {{ include "jupyterhub.image-pull-secret.fullname" . | quote }}
+ingress: {{ include "jupyterhub.ingress.fullname" . | quote }}
+priority: {{ include "jupyterhub.priority.fullname" . | quote }}
+user-placeholder-priority: {{ include "jupyterhub.user-placeholder-priority.fullname" . | quote }}
+user-scheduler: {{ include "jupyterhub.user-scheduler.fullname" . | quote }}
+{{- end }}
diff --git a/jupyterhub/templates/_helpers-netpol.tpl b/jupyterhub/templates/_helpers-netpol.tpl
new file mode 100644
index 0000000..5adbd3d
--- /dev/null
+++ b/jupyterhub/templates/_helpers-netpol.tpl
@@ -0,0 +1,86 @@
+{{- /*
+ This named template renders egress rules for NetworkPolicy resources based on
+ common configuration.
+
+ It is rendering based on the `egressAllowRules` and `egress` keys of the
+ passed networkPolicy config object. Each flag set to true under
+ `egressAllowRules` is rendered to a egress rule that next to any custom user
+ defined rules from the `egress` config.
+
+ This named template needs to render based on a specific networkPolicy
+ resource, but also needs access to the root context. Due to that, it
+ accepts a list as its scope, where the first element is supposed to be the
+ root context and the second element is supposed to be the networkPolicy
+ configuration object.
+
+ As an example, this is how you would render this named template from a
+ NetworkPolicy resource under its egress:
+
+ egress:
+ # other rules here...
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+
+ Note that the reference to privateIPs and nonPrivateIPs relate to
+ https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
+*/}}
+
+{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
+{{- $root := index . 0 }}
+{{- $netpol := index . 1 }}
+{{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
+# Allow outbound connections to the DNS port in the private IP ranges
+- ports:
+ - protocol: UDP
+ port: 53
+ - protocol: TCP
+ port: 53
+ to:
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+{{- end }}
+
+{{- if $netpol.egressAllowRules.nonPrivateIPs }}
+# Allow outbound connections to non-private IP ranges
+- to:
+ - ipBlock:
+ cidr: 0.0.0.0/0
+ except:
+ # As part of this rule, don't:
+ # - allow outbound connections to private IP
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ # - allow outbound connections to the cloud metadata server
+ - {{ $root.Values.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- if $netpol.egressAllowRules.privateIPs }}
+# Allow outbound connections to private IP ranges
+- to:
+ - ipBlock:
+ cidr: 10.0.0.0/8
+ - ipBlock:
+ cidr: 172.16.0.0/12
+ - ipBlock:
+ cidr: 192.168.0.0/16
+{{- end }}
+
+{{- if $netpol.egressAllowRules.cloudMetadataServer }}
+# Allow outbound connections to the cloud metadata server
+- to:
+ - ipBlock:
+ cidr: {{ $root.Values.singleuser.cloudMetadata.ip }}/32
+{{- end }}
+
+{{- with $netpol.egress }}
+# Allow outbound connections based on user specified rules
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/_helpers.tpl b/jupyterhub/templates/_helpers.tpl
new file mode 100644
index 0000000..5cc5e6d
--- /dev/null
+++ b/jupyterhub/templates/_helpers.tpl
@@ -0,0 +1,402 @@
+{{- /*
+ ## About
+ This file contains helpers to systematically name, label and select Kubernetes
+ objects we define in the .yaml template files.
+
+
+ ## How helpers work
+ Helm helper functions is a good way to avoid repeating something. They will
+ generate some output based on one single dictionary of input that we call the
+ helpers scope. When you are in helm, you access your current scope with a
+ single a single punctuation (.).
+
+ When you ask a helper to render its content, one often forward the current
+ scope to the helper in order to allow it to access .Release.Name,
+ .Values.rbac.create and similar values.
+
+ #### Example - Passing the current scope
+ {{ include "jupyterhub.commonLabels" . }}
+
+ It would be possible to pass something specific instead of the current scope
+ (.), but that would make .Release.Name etc. inaccessible by the helper which
+ is something we aim to avoid.
+
+ #### Example - Passing a new scope
+ {{ include "demo.bananaPancakes" (dict "pancakes" 5 "bananas" 3) }}
+
+ To let a helper access the current scope along with additional values we have
+ opted to create dictionary containing additional values that is then populated
+ with additional values from the current scope through a the merge function.
+
+ #### Example - Passing a new scope augmented with the old
+ {{- $_ := merge (dict "appLabel" "kube-lego") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+
+ In this way, the code within the definition of `jupyterhub.matchLabels` will
+ be able to access .Release.Name and .appLabel.
+
+ NOTE:
+ The ordering of merge is crucial, the latter argument is merged into the
+ former. So if you would swap the order you would influence the current scope
+ risking unintentional behavior. Therefore, always put the fresh unreferenced
+ dictionary (dict "key1" "value1") first and the current scope (.) last.
+
+
+ ## Declared helpers
+ - appLabel |
+ - componentLabel |
+ - commonLabels | uses appLabel
+ - labels | uses commonLabels
+ - matchLabels | uses labels
+ - podCullerSelector | uses matchLabels
+
+
+ ## Example usage
+ ```yaml
+ # Excerpt from proxy/autohttps/deployment.yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ spec:
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.labels" $_ | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-http: "true"
+ ```
+
+ NOTE:
+ The "jupyterhub.matchLabels" and "jupyterhub.labels" is passed an augmented
+ scope that will influence the helpers' behavior. It get the current scope
+ "." but merged with a dictionary containing extra key/value pairs. In this
+ case the "." scope was merged with a small dictionary containing only one
+ key/value pair "appLabel: kube-lego". It is required for kube-lego to
+ function properly. It is a way to override the default app label's value.
+*/}}
+
+
+{{- /*
+ jupyterhub.appLabel:
+ Used by "jupyterhub.labels".
+*/}}
+{{- define "jupyterhub.appLabel" -}}
+{{ .Values.nameOverride | default .Chart.Name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.componentLabel:
+ Used by "jupyterhub.labels".
+
+ NOTE: The component label is determined by either...
+ - 1: The provided scope's .componentLabel
+ - 2: The template's filename if living in the root folder
+ - 3: The template parent folder's name
+ - : ...and is combined with .componentPrefix and .componentSuffix
+*/}}
+{{- define "jupyterhub.componentLabel" -}}
+{{- $file := .Template.Name | base | trimSuffix ".yaml" -}}
+{{- $parent := .Template.Name | dir | base | trimPrefix "templates" -}}
+{{- $component := .componentLabel | default $parent | default $file -}}
+{{- $component := print (.componentPrefix | default "") $component (.componentSuffix | default "") -}}
+{{ $component }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.commonLabels:
+ Foundation for "jupyterhub.labels".
+ Provides labels: app, release, (chart and heritage).
+*/}}
+{{- define "jupyterhub.commonLabels" -}}
+app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
+release: {{ .Release.Name }}
+{{- if not .matchLabels }}
+chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+heritage: {{ .heritageLabel | default .Release.Service }}
+{{- end }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.labels:
+ Provides labels: component, app, release, (chart and heritage).
+*/}}
+{{- define "jupyterhub.labels" -}}
+component: {{ include "jupyterhub.componentLabel" . }}
+{{ include "jupyterhub.commonLabels" . }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.matchLabels:
+ Used to provide pod selection labels: component, app, release.
+*/}}
+{{- define "jupyterhub.matchLabels" -}}
+{{- $_ := merge (dict "matchLabels" true) . -}}
+{{ include "jupyterhub.labels" $_ }}
+{{- end }}
+
+
+{{- /*
+ jupyterhub.dockerconfigjson:
+ Creates a base64 encoded docker registry json blob for use in a image pull
+ secret, just like the `kubectl create secret docker-registry` command does
+ for the generated secrets data.dockerconfigjson field. The output is
+ verified to be exactly the same even if you have a password spanning
+ multiple lines as you may need to use a private GCR registry.
+
+ - https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+*/}}
+{{- define "jupyterhub.dockerconfigjson" -}}
+{{ include "jupyterhub.dockerconfigjson.yaml" . | b64enc }}
+{{- end }}
+
+{{- define "jupyterhub.dockerconfigjson.yaml" -}}
+{{- with .Values.imagePullSecret -}}
+{
+ "auths": {
+ {{ .registry | default "https://index.docker.io/v1/" | quote }}: {
+ "username": {{ .username | quote }},
+ "password": {{ .password | quote }},
+ {{- if .email }}
+ "email": {{ .email | quote }},
+ {{- end }}
+ "auth": {{ (print .username ":" .password) | b64enc | quote }}
+ }
+ }
+}
+{{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.imagePullSecrets
+ Augments passed .pullSecrets with $.Values.imagePullSecrets
+*/}}
+{{- define "jupyterhub.imagePullSecrets" -}}
+ {{- /*
+ We have implemented a trick to allow a parent chart depending on this
+ chart to call this named templates.
+
+ Caveats and notes:
+
+ 1. While parent charts can reference these, grandparent charts can't.
+ 2. Parent charts must not use an alias for this chart.
+ 3. There is no failsafe workaround to above due to
+ https://github.com/helm/helm/issues/9214.
+ 4. .Chart is of its own type (*chart.Metadata) and needs to be casted
+ using "toYaml | fromYaml" in order to be able to use normal helm
+ template functions on it.
+ */}}
+ {{- $jupyterhub_values := .root.Values }}
+ {{- if ne .root.Chart.Name "jupyterhub" }}
+ {{- if .root.Values.jupyterhub }}
+ {{- $jupyterhub_values = .root.Values.jupyterhub }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* Populate $_.list with all relevant entries */}}
+ {{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
+ {{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
+ {{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
+ {{- end }}
+
+ {{- /* Decide if something should be written */}}
+ {{- if not (eq ($_.list | toJson) "[]") }}
+
+ {{- /* Process the $_.list where strings become dicts with a name key and the
+ strings become the name keys' values into $_.res */}}
+ {{- $_ := set $_ "res" list }}
+ {{- range $_.list }}
+ {{- if eq (typeOf .) "string" }}
+ {{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
+ {{- else }}
+ {{- $__ := set $_ "res" (append $_.res .) }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* Write the results */}}
+ {{- $_.res | toJson }}
+
+ {{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.singleuser.resources:
+ The resource request of a singleuser.
+*/}}
+{{- define "jupyterhub.singleuser.resources" -}}
+{{- $r1 := .Values.singleuser.cpu.guarantee -}}
+{{- $r2 := .Values.singleuser.memory.guarantee -}}
+{{- $r3 := .Values.singleuser.extraResource.guarantees -}}
+{{- $r := or $r1 $r2 $r3 -}}
+{{- $l1 := .Values.singleuser.cpu.limit -}}
+{{- $l2 := .Values.singleuser.memory.limit -}}
+{{- $l3 := .Values.singleuser.extraResource.limits -}}
+{{- $l := or $l1 $l2 $l3 -}}
+{{- if $r -}}
+requests:
+ {{- if $r1 }}
+ cpu: {{ .Values.singleuser.cpu.guarantee }}
+ {{- end }}
+ {{- if $r2 }}
+ memory: {{ .Values.singleuser.memory.guarantee }}
+ {{- end }}
+ {{- if $r3 }}
+ {{- range $key, $value := .Values.singleuser.extraResource.guarantees }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- if $l }}
+limits:
+ {{- if $l1 }}
+ cpu: {{ .Values.singleuser.cpu.limit }}
+ {{- end }}
+ {{- if $l2 }}
+ memory: {{ .Values.singleuser.memory.limit }}
+ {{- end }}
+ {{- if $l3 }}
+ {{- range $key, $value := .Values.singleuser.extraResource.limits }}
+ {{ $key | quote }}: {{ $value | quote }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.extraEnv:
+ Output YAML formatted EnvVar entries for use in a containers env field.
+*/}}
+{{- define "jupyterhub.extraEnv" -}}
+{{- include "jupyterhub.extraEnv.withTrailingNewLine" . | trimSuffix "\n" }}
+{{- end }}
+
+{{- define "jupyterhub.extraEnv.withTrailingNewLine" -}}
+{{- if . }}
+{{- /* If extraEnv is a list, we inject it as it is. */}}
+{{- if eq (typeOf .) "[]interface {}" }}
+{{- . | toYaml }}
+
+{{- /* If extraEnv is a map, we differentiate two cases: */}}
+{{- else if eq (typeOf .) "map[string]interface {}" }}
+{{- range $key, $value := . }}
+{{- /*
+ - If extraEnv.someKey has a map value, then we add the value as a YAML
+ parsed list element and use the key as the name value unless its
+ explicitly set.
+*/}}
+{{- if eq (typeOf $value) "map[string]interface {}" }}
+{{- merge (dict) $value (dict "name" $key) | list | toYaml | println }}
+{{- /*
+ - If extraEnv.someKey has a string value, then we use the key as the
+ environment variable name for the value.
+*/}}
+{{- else if eq (typeOf $value) "string" -}}
+- name: {{ $key | quote }}
+ value: {{ $value | quote | println }}
+{{- else }}
+{{- printf "?.extraEnv.%s had an unexpected type (%s)" $key (typeOf $value) | fail }}
+{{- end }}
+{{- end }} {{- /* end of range */}}
+{{- end }}
+{{- end }} {{- /* end of: if . */}}
+{{- end }} {{- /* end of definition */}}
+
+{{- /*
+ jupyterhub.extraFiles.data:
+ Renders content for a k8s Secret's data field, coming from extraFiles with
+ binaryData entries.
+*/}}
+{{- define "jupyterhub.extraFiles.data.withNewLineSuffix" -}}
+ {{- range $file_key, $file_details := . }}
+ {{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }}
+ {{- if $file_details.binaryData }}
+ {{- $file_key | quote }}: {{ $file_details.binaryData | nospace | quote }}{{ println }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- define "jupyterhub.extraFiles.data" -}}
+ {{- include "jupyterhub.extraFiles.data.withNewLineSuffix" . | trimSuffix "\n" }}
+{{- end }}
+
+{{- /*
+ jupyterhub.extraFiles.stringData:
+ Renders content for a k8s Secret's stringData field, coming from extraFiles
+ with either data or stringData entries.
+*/}}
+{{- define "jupyterhub.extraFiles.stringData.withNewLineSuffix" -}}
+ {{- range $file_key, $file_details := . }}
+ {{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }}
+ {{- $file_name := $file_details.mountPath | base }}
+ {{- if $file_details.stringData }}
+ {{- $file_key | quote }}: |
+ {{- $file_details.stringData | trimSuffix "\n" | nindent 2 }}{{ println }}
+ {{- end }}
+ {{- if $file_details.data }}
+ {{- $file_key | quote }}: |
+ {{- if or (eq (ext $file_name) ".yaml") (eq (ext $file_name) ".yml") }}
+ {{- $file_details.data | toYaml | nindent 2 }}{{ println }}
+ {{- else if eq (ext $file_name) ".json" }}
+ {{- $file_details.data | toJson | nindent 2 }}{{ println }}
+ {{- else if eq (ext $file_name) ".toml" }}
+ {{- $file_details.data | toToml | trimSuffix "\n" | nindent 2 }}{{ println }}
+ {{- else }}
+ {{- print "\n\nextraFiles entries with 'data' (" $file_key " > " $file_details.mountPath ") needs to have a filename extension of .yaml, .yml, .json, or .toml!" | fail }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- define "jupyterhub.extraFiles.stringData" -}}
+ {{- include "jupyterhub.extraFiles.stringData.withNewLineSuffix" . | trimSuffix "\n" }}
+{{- end }}
+
+{{- define "jupyterhub.extraFiles.validate-file" -}}
+ {{- $file_key := index . 0 }}
+ {{- $file_details := index . 1 }}
+
+ {{- /* Use of mountPath. */}}
+ {{- if not ($file_details.mountPath) }}
+ {{- print "\n\nextraFiles entries (" $file_key ") must contain the field 'mountPath'." | fail }}
+ {{- end }}
+
+ {{- /* Use one of stringData, binaryData, data. */}}
+ {{- $field_count := 0 }}
+ {{- if $file_details.data }}
+ {{- $field_count = add1 $field_count }}
+ {{- end }}
+ {{- if $file_details.stringData }}
+ {{- $field_count = add1 $field_count }}
+ {{- end }}
+ {{- if $file_details.binaryData }}
+ {{- $field_count = add1 $field_count }}
+ {{- end }}
+ {{- if ne $field_count 1 }}
+ {{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
+ {{- end }}
+{{- end }}
+
+{{- /*
+ jupyterhub.chart-version-to-git-ref:
+ Renders a valid git reference from a chartpress generated version string.
+ In practice, either a git tag or a git commit hash will be returned.
+
+ - The version string will follow a chartpress pattern, see
+ https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
+
+ - The regexReplaceAll function is a sprig library function, see
+ https://masterminds.github.io/sprig/strings.html.
+
+ - The regular expression is in golang syntax, but \d had to become \\d for
+ example.
+*/}}
+{{- define "jupyterhub.chart-version-to-git-ref" -}}
+{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/_helpers-passwords.tpl b/jupyterhub/templates/hub/_helpers-passwords.tpl
new file mode 100644
index 0000000..83edf70
--- /dev/null
+++ b/jupyterhub/templates/hub/_helpers-passwords.tpl
@@ -0,0 +1,92 @@
+{{- /*
+ This file contains logic to lookup already
+ generated passwords or generate a new.
+
+ proxy.secretToken / hub.config.ConfigurableHTTPProxy.auth_token
+ hub.cookieSecret / hub.config.JupyterHub.cookie_secret
+ auth.state.cryptoKey* / hub.config.CryptKeeper.keys
+
+ *Note that the entire auth section is deprecated and users
+ are forced through "fail" in NOTES.txt to migrate to hub.config.
+
+ Note that lookup logic returns falsy value when run with
+ `helm diff upgrade`, so it is a bit troublesome to test.
+*/}}
+
+{{- /*
+ Returns given number of random Hex characters.
+
+ - randNumeric 4 | atoi generates a random number in [0, 10^4)
+ This is a range range evenly divisble by 16, but even if off by one,
+ that last partial interval offsetting randomness is only 1 part in 625.
+ - mod N 16 maps to the range 0-15
+ - printf "%x" represents a single number 0-15 as a single hex character
+*/}}
+{{- define "jupyterhub.randHex" -}}
+ {{- $result := "" }}
+ {{- range $i := until . }}
+ {{- $rand_hex_char := mod (randNumeric 4 | atoi) 16 | printf "%x" }}
+ {{- $result = print $result $rand_hex_char }}
+ {{- end }}
+ {{- $result }}
+{{- end }}
+
+{{- define "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" -}}
+ {{- if (.Values.hub.config | dig "ConfigurableHTTPProxy" "auth_token" "") }}
+ {{- .Values.hub.config.ConfigurableHTTPProxy.auth_token }}
+ {{- else if .Values.proxy.secretToken }}
+ {{- .Values.proxy.secretToken }}
+ {{- else }}
+ {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
+ {{- if hasKey $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" }}
+ {{- index $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" | b64dec }}
+ {{- else }}
+ {{- randAlphaNum 64 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- define "jupyterhub.hub.config.JupyterHub.cookie_secret" -}}
+ {{- if (.Values.hub.config | dig "JupyterHub" "cookie_secret" "") }}
+ {{- .Values.hub.config.JupyterHub.cookie_secret }}
+ {{- else if .Values.hub.cookieSecret }}
+ {{- .Values.hub.cookieSecret }}
+ {{- else }}
+ {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
+ {{- if hasKey $k8s_state.data "hub.config.JupyterHub.cookie_secret" }}
+ {{- index $k8s_state.data "hub.config.JupyterHub.cookie_secret" | b64dec }}
+ {{- else }}
+ {{- include "jupyterhub.randHex" 64 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- define "jupyterhub.hub.config.CryptKeeper.keys" -}}
+ {{- if (.Values.hub.config | dig "CryptKeeper" "keys" "") }}
+ {{- .Values.hub.config.CryptKeeper.keys | join ";" }}
+ {{- else }}
+ {{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
+ {{- if hasKey $k8s_state.data "hub.config.CryptKeeper.keys" }}
+ {{- index $k8s_state.data "hub.config.CryptKeeper.keys" | b64dec }}
+ {{- else }}
+ {{- include "jupyterhub.randHex" 64 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+
+{{- define "jupyterhub.hub.services.get_api_token" -}}
+ {{- $_ := index . 0 }}
+ {{- $service_key := index . 1 }}
+ {{- $explicitly_set_api_token := or ($_.Values.hub.services | dig $service_key "api_token" "") ($_.Values.hub.services | dig $service_key "apiToken" "") }}
+ {{- if $explicitly_set_api_token }}
+ {{- $explicitly_set_api_token }}
+ {{- else }}
+ {{- $k8s_state := lookup "v1" "Secret" $_.Release.Namespace (include "jupyterhub.hub.fullname" $_) | default (dict "data" (dict)) }}
+ {{- $k8s_secret_key := print "hub.services." $service_key ".apiToken" }}
+ {{- if hasKey $k8s_state.data $k8s_secret_key }}
+ {{- index $k8s_state.data $k8s_secret_key | b64dec }}
+ {{- else }}
+ {{- include "jupyterhub.randHex" 64 }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/configmap-theme.yaml b/jupyterhub/templates/hub/configmap-theme.yaml
new file mode 100644
index 0000000..8d744b7
--- /dev/null
+++ b/jupyterhub/templates/hub/configmap-theme.yaml
@@ -0,0 +1,22 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}-theme
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ {{- /*
+ Glob files to allow them to be mounted by the hub pod
+
+ jupyterhub_config: |
+ multi line string content...
+ z2jh.py: |
+ multi line string content...
+ */}}
+ {{- (.Files.Glob "files/theme/*").AsConfig | nindent 2 }}
+
+ {{- /*
+ Store away a checksum of the hook-image-puller daemonset so future upgrades
+ can compare and decide if it should run or not using the `lookup` function.
+ */}}
+ checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
diff --git a/jupyterhub/templates/hub/configmap.yaml b/jupyterhub/templates/hub/configmap.yaml
new file mode 100644
index 0000000..128a6a0
--- /dev/null
+++ b/jupyterhub/templates/hub/configmap.yaml
@@ -0,0 +1,30 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ {{- /*
+ Resource names exposed to reliably reference them.
+
+ user-scheduler: "my-helm-release-user-scheduler"
+ ...
+ */}}
+ {{- include "jupyterhub.name-templates" . | nindent 2 }}
+
+ {{- /*
+ Glob files to allow them to be mounted by the hub pod
+
+ jupyterhub_config: |
+ multi line string content...
+ z2jh.py: |
+ multi line string content...
+ */}}
+ {{- (.Files.Glob "files/hub/*").AsConfig | nindent 2 }}
+
+ {{- /*
+ Store away a checksum of the hook-image-puller daemonset so future upgrades
+ can compare and decide if it should run or not using the `lookup` function.
+ */}}
+ checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
diff --git a/jupyterhub/templates/hub/deployment.yaml b/jupyterhub/templates/hub/deployment.yaml
new file mode 100644
index 0000000..dd46d2f
--- /dev/null
+++ b/jupyterhub/templates/hub/deployment.yaml
@@ -0,0 +1,252 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if typeIs "int" .Values.hub.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }}
+ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ strategy:
+ {{- .Values.hub.deploymentStrategy | toYaml | nindent 4 }}
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-api: "true"
+ hub.jupyter.org/network-access-proxy-http: "true"
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- with .Values.hub.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- /* This lets us autorestart when the secret changes! */}}
+ checksum/configmap: {{ include (print .Template.BasePath "/hub/configmap.yaml") . | sha256sum }}
+ checksum/theme: {{ include (print .Template.BasePath "/hub/configmap-theme.yaml") . | sha256sum }}
+ checksum/secret: {{ include (print .Template.BasePath "/hub/secret.yaml") . | sha256sum }}
+ {{- with .Values.hub.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.hub.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.corePods.tolerations .Values.hub.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ volumes:
+ - name: theme
+ configMap:
+ name: {{ include "jupyterhub.hub.fullname" . }}-theme
+ - name: config
+ configMap:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ - name: secret
+ secret:
+ secretName: {{ include "jupyterhub.hub.fullname" . }}
+ {{- with (include "jupyterhub.hub-existing-secret.fullname" .) }}
+ - name: existing-secret
+ secret:
+ secretName: {{ . }}
+ {{- end }}
+ {{- if .Values.hub.extraFiles }}
+ - name: files
+ secret:
+ secretName: {{ include "jupyterhub.hub.fullname" . }}
+ items:
+ {{- range $file_key, $file_details := .Values.hub.extraFiles }}
+ - key: {{ $file_key | quote }}
+ path: {{ $file_key | quote }}
+ {{- with $file_details.mode }}
+ mode: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.hub.extraVolumes }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- if eq .Values.hub.db.type "sqlite-pvc" }}
+ - name: pvc
+ persistentVolumeClaim:
+ claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
+ {{- end }}
+ {{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- with .Values.hub.podSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.hub.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ {{- with .Values.hub.initContainers }}
+ initContainers:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ containers:
+ {{- with .Values.hub.extraContainers }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ - name: hub
+ image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }}
+ {{- with .Values.hub.command }}
+ command:
+ {{- range . }}
+ - {{ tpl . $ }}
+ {{- end }}
+ {{- end }}
+ args:
+ {{- /* .Values.hub.args overrides everything the Helm chart otherside would set */}}
+ {{- if .Values.hub.args }}
+ {{- range .Values.hub.args }}
+ - {{ tpl . $ }}
+ {{- end }}
+
+ {{- /* .Values.hub.args didn't replace the default logic */}}
+ {{- else }}
+ - jupyterhub
+ - --config
+ - /usr/local/etc/jupyterhub/jupyterhub_config.py
+ {{- if .Values.debug.enabled }}
+ - --debug
+ {{- end }}
+ {{- /* NOTE:
+ We want to do automatic upgrades for sqlite-pvc by default, but
+ allow users to opt out of that if they want. Users using their own
+ db need to 'opt in' Go Templates treat nil and "" and false as
+ 'false', making this code complex. We can probably make this a
+ one-liner, but doing combinations of boolean vars in go templates
+ is very inelegant & hard to reason about.
+ */}}
+ {{- $upgradeType := typeOf .Values.hub.db.upgrade }}
+ {{- if eq $upgradeType "bool" }}
+ {{- /* .Values.hub.db.upgrade has been explicitly set to true or false */}}
+ {{- if .Values.hub.db.upgrade }}
+ - --upgrade-db
+ {{- end }}
+ {{- else if eq $upgradeType "" }}
+ {{- /* .Values.hub.db.upgrade is nil */}}
+ {{- if eq .Values.hub.db.type "sqlite-pvc" }}
+ - --upgrade-db
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: /usr/local/etc/jupyterhub/config.yml
+ subPath: config.yml
+ name: config
+ - mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.py
+ subPath: jupyterhub_config.py
+ name: config
+ - mountPath: /usr/local/etc/jupyterhub/z2jh.py
+ subPath: z2jh.py
+ name: config
+ - mountPath: /usr/local/etc/jupyterhub/config/
+ name: config
+ - mountPath: /opt/jupyterhub/template/
+ name: theme
+ - mountPath: /usr/local/etc/jupyterhub/secret/
+ name: secret
+ {{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
+ - mountPath: /usr/local/etc/jupyterhub/existing-secret/
+ name: existing-secret
+ {{- end }}
+ {{- range $file_key, $file_details := .Values.hub.extraFiles }}
+ - mountPath: {{ $file_details.mountPath }}
+ subPath: {{ $file_key | quote }}
+ name: files
+ {{- end }}
+ {{- with .Values.hub.extraVolumeMounts }}
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if eq .Values.hub.db.type "sqlite-pvc" }}
+ - mountPath: /srv/jupyterhub
+ name: pvc
+ {{- with .Values.hub.db.pvc.subPath }}
+ subPath: {{ . | quote }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.hub.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.hub.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.hub.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.hub.lifecycle }}
+ lifecycle:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ env:
+ - name: PYTHONUNBUFFERED
+ value: "1"
+ - name: HELM_RELEASE_NAME
+ value: {{ .Release.Name | quote }}
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIGPROXY_AUTH_TOKEN
+ valueFrom:
+ secretKeyRef:
+ {{- /* NOTE:
+ References the chart managed k8s Secret even if
+ hub.existingSecret is specified to avoid using the lookup
+ function on the user managed k8s Secret which is assumed to
+ not be possible.
+ */}}
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ key: hub.config.ConfigurableHTTPProxy.auth_token
+ {{- with .Values.hub.extraEnv }}
+ {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: 8081
+ {{- if .Values.hub.livenessProbe.enabled }}
+ {{- /* NOTE:
+ We don't know how long hub database upgrades could take so having a
+ liveness probe could be a bit risky unless we put a
+ initialDelaySeconds value with long enough margin for that to not be
+ an issue. If it is too short, we could end up aborting database
+ upgrades midway or ending up in an infinite restart loop.
+ */}}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.hub.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.hub.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.hub.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.hub.livenessProbe.failureThreshold }}
+ httpGet:
+ path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
+ port: http
+ {{- end }}
+ {{- if .Values.hub.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.hub.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.hub.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.hub.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.hub.readinessProbe.failureThreshold }}
+ httpGet:
+ path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
+ port: http
+ {{- end }}
+ {{- with .Values.hub.extraPodSpec }}
+ {{- . | toYaml | nindent 6 }}
+ {{- end }}
diff --git a/jupyterhub/templates/hub/netpol.yaml b/jupyterhub/templates/hub/netpol.yaml
new file mode 100644
index 0000000..904b2c3
--- /dev/null
+++ b/jupyterhub/templates/hub/netpol.yaml
@@ -0,0 +1,84 @@
+{{- if .Values.hub.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+
+ # IMPORTANT:
+ # NetworkPolicy's ingress "from" and egress "to" rule specifications require
+ # great attention to detail. A quick summary is:
+ #
+ # 1. You can provide "from"/"to" rules that provide access either ports or a
+ # subset of ports.
+ # 2. You can for each "from"/"to" rule provide any number of
+ # "sources"/"destinations" of four different kinds.
+ # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
+ # - namespaceSelector - targets all pods running in namespaces with a certain label
+ # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
+ # - ipBlock - targets network traffic from/to a set of IP address ranges
+ #
+ # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
+ #
+ ingress:
+ {{- with .Values.hub.networkPolicy.allowedIngressPorts }}
+ # allow incoming traffic to these ports independent of source
+ - ports:
+ {{- range $port := . }}
+ - port: {{ $port }}
+ {{- end }}
+ {{- end }}
+
+ # allowed pods (hub.jupyter.org/network-access-hub) --> hub
+ - ports:
+ - port: http
+ from:
+ # source 1 - labeled pods
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-hub: "true"
+ {{- if eq .Values.hub.networkPolicy.interNamespaceAccessLabels "accept" }}
+ namespaceSelector:
+ matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
+ # source 2 - pods in labeled namespaces
+ - namespaceSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-hub: "true"
+ {{- end }}
+
+ {{- with .Values.hub.networkPolicy.ingress }}
+ # depends, but default is nothing --> hub
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+
+ egress:
+ # hub --> proxy
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8001
+
+ # hub --> singleuser-server
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/pdb.yaml b/jupyterhub/templates/hub/pdb.yaml
new file mode 100644
index 0000000..3a22e39
--- /dev/null
+++ b/jupyterhub/templates/hub/pdb.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.hub.pdb.enabled -}}
+{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
+{{- /* k8s 1.21+ required */ -}}
+apiVersion: policy/v1
+{{- else }}
+apiVersion: policy/v1beta1
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if not (typeIs "" .Values.hub.pdb.maxUnavailable) }}
+ maxUnavailable: {{ .Values.hub.pdb.maxUnavailable }}
+ {{- end }}
+ {{- if not (typeIs "" .Values.hub.pdb.minAvailable) }}
+ minAvailable: {{ .Values.hub.pdb.minAvailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/pvc.yaml b/jupyterhub/templates/hub/pvc.yaml
new file mode 100644
index 0000000..a433a97
--- /dev/null
+++ b/jupyterhub/templates/hub/pvc.yaml
@@ -0,0 +1,25 @@
+{{- if eq .Values.hub.db.type "sqlite-pvc" -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.hub-pvc.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.hub.db.pvc.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.hub.db.pvc.selector }}
+ selector:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ {{- if typeIs "string" .Values.hub.db.pvc.storageClassName }}
+ storageClassName: {{ .Values.hub.db.pvc.storageClassName | quote }}
+ {{- end }}
+ accessModes:
+ {{- .Values.hub.db.pvc.accessModes | toYaml | nindent 4 }}
+ resources:
+ requests:
+ storage: {{ .Values.hub.db.pvc.storage | quote }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/rbac.yaml b/jupyterhub/templates/hub/rbac.yaml
new file mode 100644
index 0000000..3abf00e
--- /dev/null
+++ b/jupyterhub/templates/hub/rbac.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.rbac.create -}}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""] # "" indicates the core API group
+ resources: ["pods", "persistentvolumeclaims", "secrets", "services"]
+ verbs: ["get", "watch", "list", "create", "delete"]
+ - apiGroups: [""] # "" indicates the core API group
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+roleRef:
+ kind: Role
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/jupyterhub/templates/hub/secret.yaml b/jupyterhub/templates/hub/secret.yaml
new file mode 100644
index 0000000..851bda0
--- /dev/null
+++ b/jupyterhub/templates/hub/secret.yaml
@@ -0,0 +1,50 @@
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: Opaque
+data:
+ {{- $values := merge dict .Values }}
+ {{- /* also passthrough subset of Chart / Release */}}
+ {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
+ {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
+ values.yaml: {{ $values | toYaml | b64enc | quote }}
+
+ {{- with .Values.hub.db.password }}
+ # Used to mount MYSQL_PWD or PGPASSWORD on hub pod, unless hub.existingSecret
+ # is set as then that k8s Secret's value must be specified instead.
+ hub.db.password: {{ . | b64enc | quote }}
+ {{- end }}
+
+ # Any JupyterHub Services api_tokens are exposed in this k8s Secret as a
+ # convinience for external services running in the k8s cluster that could
+ # mount them directly from this k8s Secret.
+ {{- range $key, $service := .Values.hub.services }}
+ hub.services.{{ $key }}.apiToken: {{ include "jupyterhub.hub.services.get_api_token" (list $ $key) | b64enc | quote }}
+ {{- end }}
+
+ # During Helm template rendering, these values that can be autogenerated for
+ # users are set using the following logic:
+ #
+ # 1. Use chart configuration's value
+ # 2. Use k8s Secret's value
+ # 3. Use a new autogenerated value
+ #
+ # hub.config.ConfigurableHTTPProxy.auth_token: for hub to proxy-api authorization (JupyterHub.proxy_auth_token is deprecated)
+ # hub.config.JupyterHub.cookie_secret: for cookie encryption
+ # hub.config.CryptKeeper.keys: for auth state encryption
+ #
+ hub.config.ConfigurableHTTPProxy.auth_token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | required "This should not happen: blank output from 'jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token' template" | b64enc | quote }}
+ hub.config.JupyterHub.cookie_secret: {{ include "jupyterhub.hub.config.JupyterHub.cookie_secret" . | required "This should not happen: blank output from 'jupyterhub.hub.config.JupyterHub.cookie_secret' template" | b64enc | quote }}
+ hub.config.CryptKeeper.keys: {{ include "jupyterhub.hub.config.CryptKeeper.keys" . | required "This should not happen: blank output from 'jupyterhub.hub.config.CryptKeeper.keys' template" | b64enc | quote }}
+
+ {{- with include "jupyterhub.extraFiles.data" .Values.hub.extraFiles }}
+ {{- . | nindent 2 }}
+ {{- end }}
+
+{{- with include "jupyterhub.extraFiles.stringData" .Values.hub.extraFiles }}
+stringData:
+ {{- . | nindent 2 }}
+{{- end }}
diff --git a/jupyterhub/templates/hub/service.yaml b/jupyterhub/templates/hub/service.yaml
new file mode 100644
index 0000000..13f80b5
--- /dev/null
+++ b/jupyterhub/templates/hub/service.yaml
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ {{- if not (index .Values.hub.service.annotations "prometheus.io/scrape") }}
+ prometheus.io/scrape: "true"
+ {{- end }}
+ {{- if not (index .Values.hub.service.annotations "prometheus.io/path") }}
+ prometheus.io/path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/metrics
+ {{- end }}
+ {{- if not (index .Values.hub.service.annotations "prometheus.io/port") }}
+ prometheus.io/port: "8081"
+ {{- end }}
+ {{- with .Values.hub.service.annotations }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.hub.service.type }}
+ {{- with .Values.hub.service.loadBalancerIP }}
+ loadBalancerIP: {{ . }}
+ {{- end }}
+ selector:
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ ports:
+ - name: hub
+ port: 8081
+ targetPort: http
+ {{- with .Values.hub.service.ports.nodePort }}
+ nodePort: {{ . }}
+ {{- end }}
+
+ {{- with .Values.hub.service.extraPorts }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
diff --git a/jupyterhub/templates/hub/serviceaccount.yaml b/jupyterhub/templates/hub/serviceaccount.yaml
new file mode 100644
index 0000000..06a5069
--- /dev/null
+++ b/jupyterhub/templates/hub/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.hub.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
+ {{- with .Values.hub.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+{{- end }}
diff --git a/jupyterhub/templates/image-pull-secret.yaml b/jupyterhub/templates/image-pull-secret.yaml
new file mode 100644
index 0000000..e033ec6
--- /dev/null
+++ b/jupyterhub/templates/image-pull-secret.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.imagePullSecret.create }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation
+ "helm.sh/hook-weight": "-20"
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/_helpers-daemonset.tpl b/jupyterhub/templates/image-puller/_helpers-daemonset.tpl
new file mode 100644
index 0000000..1fe8276
--- /dev/null
+++ b/jupyterhub/templates/image-puller/_helpers-daemonset.tpl
@@ -0,0 +1,251 @@
+{{- /*
+Returns an image-puller daemonset. Two daemonsets will be created like this.
+- hook-image-puller: for pre helm upgrade image pulling (lives temporarily)
+- continuous-image-puller: for newly added nodes image pulling
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset" -}}
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ {{- if .hook }}
+ name: {{ include "jupyterhub.hook-image-puller.fullname" . }}
+ {{- else }}
+ name: {{ include "jupyterhub.continuous-image-puller.fullname" . }}
+ {{- end }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- if .hook }}
+ hub.jupyter.org/deletable: "true"
+ {{- end }}
+ {{- if .hook }}
+ annotations:
+ {{- /*
+ Allows the daemonset to be deleted when the image-awaiter job is completed.
+ */}}
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "-10"
+ {{- end }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 100%
+ {{- if typeIs "int" .Values.prePuller.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }}
+ {{- end }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.prePuller.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- /*
+ image-puller pods are made evictable to save on the k8s pods
+ per node limit all k8s clusters have and have a higher priority
+ than user-placeholder pods that could block an entire node.
+ */}}
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations .Values.prePuller.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- if include "jupyterhub.userNodeAffinityRequired" . }}
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ {{- include "jupyterhub.userNodeAffinityRequired" . | nindent 14 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 0
+ automountServiceAccountToken: false
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.singleuser.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ initContainers:
+ {{- /* --- Conditionally pull an image all user pods will use in an initContainer --- */}}
+ {{- $blockWithIptables := hasKey .Values.singleuser.cloudMetadata "enabled" | ternary (not .Values.singleuser.cloudMetadata.enabled) .Values.singleuser.cloudMetadata.blockWithIptables }}
+ {{- if $blockWithIptables }}
+ - name: image-pull-metadata-block
+ image: {{ .Values.singleuser.networkTools.image.name }}:{{ .Values.singleuser.networkTools.image.tag }}
+ {{- with .Values.singleuser.networkTools.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with .Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* --- Pull default image --- */}}
+ - name: image-pull-singleuser
+ image: {{ .Values.singleuser.image.name }}:{{ .Values.singleuser.image.tag }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with .Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+
+ {{- /* --- Pull extra containers' images --- */}}
+ {{- range $k, $container := concat .Values.singleuser.initContainers .Values.singleuser.extraContainers }}
+ - name: image-pull-singleuser-init-and-extra-containers-{{ $k }}
+ image: {{ $container.image }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with $.Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* --- Conditionally pull profileList images --- */}}
+ {{- if .Values.prePuller.pullProfileListImages }}
+ {{- range $k, $container := .Values.singleuser.profileList }}
+ {{- if $container.kubespawner_override }}
+ {{- if $container.kubespawner_override.image }}
+ - name: image-pull-singleuser-profilelist-{{ $k }}
+ image: {{ $container.kubespawner_override.image }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with $.Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+
+ {{- /* --- Pull extra images --- */}}
+ {{- range $k, $v := .Values.prePuller.extraImages }}
+ - name: image-pull-{{ $k }}
+ image: {{ $v.name }}:{{ $v.tag }}
+ command:
+ - /bin/sh
+ - -c
+ - echo "Pulling complete"
+ {{- with $.Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.prePuller.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ - name: pause
+ image: {{ .Values.prePuller.pause.image.name }}:{{ .Values.prePuller.pause.image.tag }}
+ {{- with .Values.prePuller.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.prePuller.pause.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+{{- end }}
+
+
+{{- /*
+ Returns a rendered k8s DaemonSet resource: continuous-image-puller
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset.continuous" -}}
+ {{- $_ := merge (dict "hook" false "componentPrefix" "continuous-") . }}
+ {{- include "jupyterhub.imagePuller.daemonset" $_ }}
+{{- end }}
+
+
+{{- /*
+ Returns a rendered k8s DaemonSet resource: hook-image-puller
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset.hook" -}}
+ {{- $_ := merge (dict "hook" true "componentPrefix" "hook-") . }}
+ {{- include "jupyterhub.imagePuller.daemonset" $_ }}
+{{- end }}
+
+
+{{- /*
+ Returns a checksum of the rendered k8s DaemonSet resource: hook-image-puller
+
+ This checksum is used when prePuller.hook.pullOnlyOnChanges=true to decide if
+ it is worth creating the hook-image-puller associated resources.
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset.hook.checksum" -}}
+ {{- /*
+ We pin componentLabel and Chart.Version as doing so can pin labels
+ of no importance if they would change. Chart.Name is also pinned as
+ a harmless technical workaround when we compute the checksum.
+ */}}
+ {{- $_ := merge (dict "componentLabel" "pinned" "Chart" (dict "Name" "jupyterhub" "Version" "pinned")) . -}}
+ {{- $yaml := include "jupyterhub.imagePuller.daemonset.hook" $_ }}
+ {{- $yaml | sha256sum }}
+{{- end }}
+
+
+{{- /*
+ Returns a truthy string or a blank string depending on if the
+ hook-image-puller should be installed. The truthy strings are comments
+ that summarize the state that led to returning a truthy string.
+
+ - prePuller.hook.enabled must be true
+ - if prePuller.hook.pullOnlyOnChanges is true, the checksum of the
+ hook-image-puller daemonset must differ since last upgrade
+*/}}
+{{- define "jupyterhub.imagePuller.daemonset.hook.install" -}}
+ {{- if .Values.prePuller.hook.enabled }}
+ {{- if .Values.prePuller.hook.pullOnlyOnChanges }}
+ {{- $new_checksum := include "jupyterhub.imagePuller.daemonset.hook.checksum" . }}
+ {{- $k8s_state := lookup "v1" "ConfigMap" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
+ {{- $old_checksum := index $k8s_state.data "checksum_hook-image-puller" | default "" }}
+ {{- if ne $new_checksum $old_checksum -}}
+# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }}
+# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }}
+# post-upgrade checksum != pre-upgrade checksum (of the hook-image-puller DaemonSet)
+# "{{ $new_checksum }}" != "{{ $old_checksum}}"
+ {{- end }}
+ {{- else -}}
+# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }}
+# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/daemonset-continuous.yaml b/jupyterhub/templates/image-puller/daemonset-continuous.yaml
new file mode 100644
index 0000000..85a572f
--- /dev/null
+++ b/jupyterhub/templates/image-puller/daemonset-continuous.yaml
@@ -0,0 +1,8 @@
+{{- /*
+The continuous-image-puller daemonset task is to pull required images to nodes
+that are added in between helm upgrades, for example by manually adding a node
+or by the cluster autoscaler.
+*/}}
+{{- if .Values.prePuller.continuous.enabled }}
+{{- include "jupyterhub.imagePuller.daemonset.continuous" . }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/daemonset-hook.yaml b/jupyterhub/templates/image-puller/daemonset-hook.yaml
new file mode 100644
index 0000000..7e9c2d0
--- /dev/null
+++ b/jupyterhub/templates/image-puller/daemonset-hook.yaml
@@ -0,0 +1,9 @@
+{{- /*
+The hook-image-puller daemonset will be created with the highest priority during
+helm upgrades. It's task is to pull the required images on all nodes. When the
+image-awaiter job confirms the required images to be pulled, the daemonset is
+deleted. Only then will the actual helm upgrade start.
+*/}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+{{- include "jupyterhub.imagePuller.daemonset.hook" . }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/job.yaml b/jupyterhub/templates/image-puller/job.yaml
new file mode 100644
index 0000000..5509f13
--- /dev/null
+++ b/jupyterhub/templates/image-puller/job.yaml
@@ -0,0 +1,76 @@
+{{- /*
+This job has a part to play in a helm upgrade process. It simply waits for the
+hook-image-puller daemonset which is started slightly before this job to get
+its' pods running. If all those pods are running they must have pulled all the
+required images on all nodes as they are used as init containers with a dummy
+command.
+*/}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "10"
+spec:
+ template:
+ # The hook-image-awaiter Job and hook-image-puller DaemonSet was
+ # conditionally created based on this state:
+ #
+ {{- include "jupyterhub.imagePuller.daemonset.hook.install" . | nindent 4 }}
+ #
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Job to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.prePuller.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with .Values.prePuller.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ restartPolicy: Never
+ {{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- with .Values.prePuller.hook.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.corePods.tolerations .Values.prePuller.hook.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.prePuller.hook.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ containers:
+ - image: {{ .Values.prePuller.hook.image.name }}:{{ .Values.prePuller.hook.image.tag }}
+ name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ {{- with .Values.prePuller.hook.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /image-awaiter
+ - -ca-path=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ - -auth-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
+ - -namespace={{ .Release.Namespace }}
+ - -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
+ - -pod-scheduling-wait-duration={{ .Values.prePuller.hook.podSchedulingWaitDuration }}
+ {{- with .Values.prePuller.hook.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.prePuller.hook.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/priorityclass.yaml b/jupyterhub/templates/image-puller/priorityclass.yaml
new file mode 100644
index 0000000..b2dbae0
--- /dev/null
+++ b/jupyterhub/templates/image-puller/priorityclass.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.scheduling.podPriority.enabled }}
+{{- if or .Values.prePuller.hook.enabled .Values.prePuller.continuous.enabled -}}
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
+ annotations:
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+value: {{ .Values.scheduling.podPriority.imagePullerPriority }}
+globalDefault: false
+description: >-
+ Enables [hook|continuous]-image-puller pods to fit on nodes even though they
+ are clogged by user-placeholder pods, while not evicting normal user pods.
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/rbac.yaml b/jupyterhub/templates/image-puller/rbac.yaml
new file mode 100644
index 0000000..996a59a
--- /dev/null
+++ b/jupyterhub/templates/image-puller/rbac.yaml
@@ -0,0 +1,45 @@
+{{- /*
+Permissions to be used by the hook-image-awaiter job
+*/}}
+{{- if .Values.rbac.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+rules:
+ - apiGroups: ["apps"] # "" indicates the core API group
+ resources: ["daemonsets"]
+ verbs: ["get"]
+---
+{{- /*
+... as declared by this binding.
+*/}}
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+roleRef:
+ kind: Role
+ name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/image-puller/serviceaccount.yaml b/jupyterhub/templates/image-puller/serviceaccount.yaml
new file mode 100644
index 0000000..8161101
--- /dev/null
+++ b/jupyterhub/templates/image-puller/serviceaccount.yaml
@@ -0,0 +1,21 @@
+{{- /*
+ServiceAccount for the pre-puller hook's image-awaiter-job
+*/}}
+{{- if .Values.prePuller.hook.serviceAccount.create -}}
+{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ hub.jupyter.org/deletable: "true"
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+ "helm.sh/hook-weight": "0"
+ {{- with .Values.prePuller.hook.serviceAccount.annotations }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/ingress.yaml b/jupyterhub/templates/ingress.yaml
new file mode 100644
index 0000000..91f96f4
--- /dev/null
+++ b/jupyterhub/templates/ingress.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.ingress.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ include "jupyterhub.ingress.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.ingress.ingressClassName }}
+ ingressClassName: "{{ . }}"
+ {{- end }}
+ rules:
+ {{- range $host := .Values.ingress.hosts | default (list "") }}
+ - http:
+ paths:
+ - path: {{ $.Values.hub.baseUrl | trimSuffix "/" }}/{{ $.Values.ingress.pathSuffix }}
+ pathType: {{ $.Values.ingress.pathType }}
+ backend:
+ service:
+ name: {{ include "jupyterhub.proxy-public.fullname" $ }}
+ port:
+ name: http
+ {{- if $host }}
+ host: {{ $host | quote }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.ingress.tls }}
+ tls:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/_README.txt b/jupyterhub/templates/proxy/autohttps/_README.txt
new file mode 100644
index 0000000..eaf1c5c
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/_README.txt
@@ -0,0 +1,9 @@
+# Automatic HTTPS Terminator
+
+This directory has Kubernetes objects for automatic Let's Encrypt Support.
+When enabled, we create a new deployment object that has an nginx-ingress
+and kube-lego container in it. This is responsible for requesting,
+storing and renewing certificates as needed from Let's Encrypt.
+
+The only change required outside of this directory is in the `proxy-public`
+service, which targets different hubs based on automatic HTTPS status.
diff --git a/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml b/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml
new file mode 100644
index 0000000..0e2a8f4
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/_configmap-dynamic.yaml
@@ -0,0 +1,109 @@
+{{- define "jupyterhub.dynamic.yaml" -}}
+# Content of dynamic.yaml to be merged merged with
+# proxy.traefik.extraDynamicConfig.
+# ----------------------------------------------------------------------------
+http:
+ # Middlewares tweaks requests. We define them here and reference them in
+ # our routers. We use them to redirect http traffic and headers to proxied
+ # web requests.
+ #
+ # ref: https://docs.traefik.io/middlewares/overview/
+ middlewares:
+ hsts:
+ # A middleware to add a HTTP Strict-Transport-Security (HSTS) response
+ # header, they function as a request for browsers to enforce HTTPS on
+ # their end in for a given time into the future, and optionally
+ # subdomains for requests to subdomains as well.
+ #
+ # ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
+ headers:
+ stsIncludeSubdomains: {{ .Values.proxy.traefik.hsts.includeSubdomains }}
+ stsPreload: {{ .Values.proxy.traefik.hsts.preload }}
+ stsSeconds: {{ .Values.proxy.traefik.hsts.maxAge }}
+ # A middleware to redirect to https
+ redirect:
+ redirectScheme:
+ permanent: true
+ scheme: https
+ # A middleware to add a X-Scheme (X-Forwarded-Proto) header that
+ # JupyterHub's Tornado web-server needs if expecting to serve https
+ # traffic. Without it we would run into issues like:
+ # https://github.com/jupyterhub/jupyterhub/issues/2284
+ scheme:
+ headers:
+ customRequestHeaders:
+ # DISCUSS ME: Can we use the X-Forwarded-Proto header instead? It
+ # seems more recognized. Mozilla calls it the de-facto standard
+ # header for this purpose, and Tornado recognizes both.
+ #
+ # ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Proto
+ # ref: https://www.tornadoweb.org/en/stable/httpserver.html#http-server
+ X-Scheme: https
+
+ # Routers routes web requests to a service and optionally tweaks them with
+ # middleware.
+ #
+ # ref: https://docs.traefik.io/routing/routers/
+ routers:
+ # Route secure https traffic to the configurable-http-proxy managed by
+ # JupyterHub.
+ default:
+ entrypoints:
+ - "https"
+ middlewares:
+ - "hsts"
+ - "scheme"
+ rule: PathPrefix(`/`)
+ service: default
+ # Use our predefined TLS options and certificate resolver, enabling
+ # this route to act as a TLS termination proxy with high security
+ # standards.
+ tls:
+ certResolver: default
+ domains:
+ {{- range $host := .Values.proxy.https.hosts }}
+ - main: {{ $host }}
+ {{- end }}
+ options: default
+
+ # Route insecure http traffic to https
+ insecure:
+ entrypoints:
+ - "http"
+ middlewares:
+ - "redirect"
+ rule: PathPrefix(`/`)
+ service: default
+
+ # Services represents the destinations we route traffic to.
+ #
+ # ref: https://docs.traefik.io/routing/services/
+ services:
+ # Represents the configurable-http-proxy (chp) server that is managed by
+ # JupyterHub to route traffic both to itself and to user pods.
+ default:
+ loadBalancer:
+ servers:
+ - url: 'http://proxy-http:8000/'
+
+# Configure TLS to give us an A+ in the ssllabs.com test
+#
+# ref: https://www.ssllabs.com/ssltest/
+tls:
+ options:
+ default:
+ # Allowed ciphers adapted from Mozillas SSL Configuration Generator
+ # configured for Intermediate support which doesn't support very old
+ # systems but doesn't require very modern either.
+ #
+ # ref: https://ssl-config.mozilla.org/#server=traefik&version=2.1.2&config=intermediate&guideline=5.4
+ cipherSuites:
+ - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
+ - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+ - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
+ - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
+ minVersion: VersionTLS12
+ sniStrict: true
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml b/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml
new file mode 100644
index 0000000..7287a70
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/_configmap-traefik.yaml
@@ -0,0 +1,68 @@
+{{- define "jupyterhub.traefik.yaml" -}}
+# Content of traefik.yaml to be merged merged with
+# proxy.traefik.extraStaticConfig.
+# ----------------------------------------------------------------------------
+
+# Config of logs about web requests
+#
+# ref: https://docs.traefik.io/observability/access-logs/
+accessLog:
+ # Redact commonly sensitive headers
+ fields:
+ headers:
+ names:
+ Authorization: redacted
+ Cookie: redacted
+ Set-Cookie: redacted
+ X-Xsrftoken: redacted
+ # Only log errors
+ filters:
+ statusCodes:
+ - 500-599
+
+# Automatically acquire certificates certificates form a Certificate
+# Authority (CA) like Let's Encrypt using the ACME protocol's HTTP-01
+# challenge.
+#
+# ref: https://docs.traefik.io/https/acme/#certificate-resolvers
+certificatesResolvers:
+ default:
+ acme:
+ caServer: {{ .Values.proxy.https.letsencrypt.acmeServer }}
+ email: {{ .Values.proxy.https.letsencrypt.contactEmail }}
+ httpChallenge:
+ entryPoint: http
+ storage: /etc/acme/acme.json
+
+# Let Traefik listen to port 80 and port 443
+#
+# ref: https://docs.traefik.io/routing/entrypoints/
+entryPoints:
+ # Port 80, used for:
+ # - ACME HTTP-01 challenges
+ # - Redirects to HTTPS
+ http:
+ address: ':8080'
+ # Port 443, used for:
+ # - TLS Termination Proxy, where HTTPS transitions to HTTP.
+ https:
+ address: ':8443'
+ # Configure a high idle timeout for our websockets connections
+ transport:
+ respondingTimeouts:
+ idleTimeout: 10m0s
+
+# Config of logs about what happens to Traefik itself (startup,
+# configuration, events, shutdown, and so on).
+#
+# ref: https://docs.traefik.io/observability/logs
+log:
+ level: {{ if .Values.debug.enabled -}} DEBUG {{- else -}} WARN {{- end }}
+
+# Let Traefik monitor another file we mount for dynamic configuration. As we
+# mount this file through this configmap, we can make a `kubectl edit` on the
+# configmap and have Traefik update on changes to dynamic.yaml.
+providers:
+ file:
+ filename: /etc/traefik/dynamic.yaml
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/configmap.yaml b/jupyterhub/templates/proxy/autohttps/configmap.yaml
new file mode 100644
index 0000000..4804bf7
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/configmap.yaml
@@ -0,0 +1,28 @@
+{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+{{- $_ := .Values.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
+
+# This configmap contains Traefik configuration files to be mounted.
+# - traefik.yaml will only be read during startup (static configuration)
+# - dynamic.yaml will be read on change (dynamic configuration)
+#
+# ref: https://docs.traefik.io/getting-started/configuration-overview/
+#
+# The configuration files are first rendered with Helm templating to large YAML
+# strings. Then we use the fromYAML function on these strings to get an object,
+# that we in turn merge with user provided extra configuration.
+#
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ traefik.yaml: |
+ {{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
+ dynamic.yaml: |
+ {{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
+
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/deployment.yaml b/jupyterhub/templates/proxy/autohttps/deployment.yaml
new file mode 100644
index 0000000..f76f3ef
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/deployment.yaml
@@ -0,0 +1,154 @@
+{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if typeIs "int" .Values.proxy.traefik.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }}
+ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-proxy-http: "true"
+ {{- with .Values.proxy.traefik.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ annotations:
+ # Only force a restart through a change to this checksum when the static
+ # configuration is changed, as the dynamic can be updated after start.
+ # Any disruptions to this deployment impacts everything, it is the
+ # entrypoint of all network traffic.
+ checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
+ spec:
+ {{- with include "jupyterhub.autohttps-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.traefik.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ volumes:
+ - name: certificates
+ emptyDir: {}
+ - name: traefik-config
+ configMap:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ {{- with .Values.proxy.traefik.extraVolumes }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.traefik.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ initContainers:
+ - name: load-acme
+ image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}"
+ {{- with .Values.proxy.secretSync.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ args:
+ - load
+ - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+ - acme.json
+ - /etc/acme/acme.json
+ env:
+ # We need this to get logs immediately
+ - name: PYTHONUNBUFFERED
+ value: "True"
+ {{- with .Values.proxy.traefik.extraEnv }}
+ {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: certificates
+ mountPath: /etc/acme
+ {{- with .Values.proxy.secretSync.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.extraInitContainers }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: traefik
+ image: "{{ .Values.proxy.traefik.image.name }}:{{ .Values.proxy.traefik.image.tag }}"
+ {{- with .Values.proxy.traefik.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http
+ containerPort: 8080
+ - name: https
+ containerPort: 8443
+ {{- with .Values.proxy.traefik.extraPorts }}
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: traefik-config
+ mountPath: /etc/traefik
+ - name: certificates
+ mountPath: /etc/acme
+ {{- with .Values.proxy.traefik.extraVolumeMounts }}
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.extraEnv }}
+ env:
+ {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ - name: secret-sync
+ image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}"
+ {{- with .Values.proxy.secretSync.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.proxy.secretSync.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ args:
+ - watch-save
+ - --label=app={{ include "jupyterhub.appLabel" . }}
+ - --label=release={{ .Release.Name }}
+ - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ - --label=heritage=secret-sync
+ - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
+ - acme.json
+ - /etc/acme/acme.json
+ env:
+ # We need this to get logs immediately
+ - name: PYTHONUNBUFFERED
+ value: "True"
+ volumeMounts:
+ - name: certificates
+ mountPath: /etc/acme
+ {{- with .Values.proxy.secretSync.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.traefik.extraPodSpec }}
+ {{- . | toYaml | nindent 6 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/netpol.yaml b/jupyterhub/templates/proxy/autohttps/netpol.yaml
new file mode 100644
index 0000000..78270b6
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/netpol.yaml
@@ -0,0 +1,78 @@
+{{- $HTTPS := .Values.proxy.https.enabled -}}
+{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
+{{- if and $autoHTTPS .Values.proxy.traefik.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+
+ # IMPORTANT:
+ # NetworkPolicy's ingress "from" and egress "to" rule specifications require
+ # great attention to detail. A quick summary is:
+ #
+ # 1. You can provide "from"/"to" rules that provide access either ports or a
+ # subset of ports.
+ # 2. You can for each "from"/"to" rule provide any number of
+ # "sources"/"destinations" of four different kinds.
+ # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
+ # - namespaceSelector - targets all pods running in namespaces with a certain label
+ # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
+ # - ipBlock - targets network traffic from/to a set of IP address ranges
+ #
+ # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
+ #
+ ingress:
+ {{- with .Values.proxy.traefik.networkPolicy.allowedIngressPorts }}
+ # allow incoming traffic to these ports independent of source
+ - ports:
+ {{- range $port := . }}
+ - port: {{ $port }}
+ {{- end }}
+ {{- end }}
+
+ # allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port)
+ - ports:
+ - port: http
+ - port: https
+ from:
+ # source 1 - labeled pods
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-http: "true"
+ {{- if eq .Values.proxy.traefik.networkPolicy.interNamespaceAccessLabels "accept" }}
+ namespaceSelector:
+ matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
+ # source 2 - pods in labeled namespaces
+ - namespaceSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-http: "true"
+ {{- end }}
+
+ {{- with .Values.proxy.traefik.networkPolicy.ingress}}
+ # depends, but default is nothing --> proxy
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+
+ egress:
+ # autohttps --> proxy (http port)
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8000
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.traefik.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/pdb.yaml b/jupyterhub/templates/proxy/autohttps/pdb.yaml
new file mode 100644
index 0000000..074d0ac
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/pdb.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.proxy.traefik.pdb.enabled -}}
+{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
+{{- /* k8s 1.21+ required */ -}}
+apiVersion: policy/v1
+{{- else }}
+apiVersion: policy/v1beta1
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: proxy
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if not (typeIs "" .Values.proxy.traefik.pdb.maxUnavailable) }}
+ maxUnavailable: {{ .Values.proxy.traefik.pdb.maxUnavailable }}
+ {{- end }}
+ {{- if not (typeIs "" .Values.proxy.traefik.pdb.minAvailable) }}
+ minAvailable: {{ .Values.proxy.traefik.pdb.minAvailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/rbac.yaml b/jupyterhub/templates/proxy/autohttps/rbac.yaml
new file mode 100644
index 0000000..a0fd41a
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/rbac.yaml
@@ -0,0 +1,35 @@
+{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}}
+{{- if $autoHTTPS -}}
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.proxy.traefik.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+rules:
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "patch", "list", "create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }}
+ apiGroup:
+roleRef:
+ kind: Role
+ name: {{ include "jupyterhub.autohttps.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/service.yaml b/jupyterhub/templates/proxy/autohttps/service.yaml
new file mode 100644
index 0000000..615e36d
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/service.yaml
@@ -0,0 +1,25 @@
+{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
+{{- if $autoHTTPS -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "jupyterhub.proxy-http.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.proxy.service.labels }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ {{- with .Values.proxy.service.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ type: ClusterIP
+ selector:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
+ ports:
+ - port: 8000
+ targetPort: http
+{{- end }}
diff --git a/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml b/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml
new file mode 100644
index 0000000..5b340bb
--- /dev/null
+++ b/jupyterhub/templates/proxy/autohttps/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}}
+{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}}
+{{- if $autoHTTPS -}}
+{{- if .Values.proxy.traefik.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/deployment.yaml b/jupyterhub/templates/proxy/deployment.yaml
new file mode 100644
index 0000000..2b35382
--- /dev/null
+++ b/jupyterhub/templates/proxy/deployment.yaml
@@ -0,0 +1,178 @@
+{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "secret") -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "jupyterhub.proxy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if typeIs "int" .Values.proxy.chp.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }}
+ {{- end }}
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ strategy:
+ {{- .Values.proxy.deploymentStrategy | toYaml | nindent 4 }}
+ template:
+ metadata:
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ hub.jupyter.org/network-access-hub: "true"
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- with .Values.proxy.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ annotations:
+ # We want to restart proxy only if the auth token changes
+ # Other changes to the hub config should not restart.
+ # We truncate to 4 chars to avoid leaking auth token info,
+ # since someone could brute force the hash to obtain the token
+ #
+ # Note that if auth_token has to be generated at random, it will be
+ # generated at random here separately from being generated at random in
+ # the k8s Secret template. This will cause this annotation to change to
+ # match the k8s Secret during the first upgrade following an auth_token
+ # was generated.
+ checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
+ checksum/proxy-secret: {{ include (print $.Template.BasePath "/proxy/secret.yaml") . | sha256sum | quote }}
+ {{- with .Values.proxy.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ terminationGracePeriodSeconds: 60
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.proxy.chp.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.chp.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ {{- if $manualHTTPS }}
+ volumes:
+ - name: tls-secret
+ secret:
+ secretName: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }}
+ {{- else if $manualHTTPSwithsecret }}
+ volumes:
+ - name: tls-secret
+ secret:
+ secretName: {{ .Values.proxy.https.secret.name }}
+ {{- end }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.chp.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ containers:
+ - name: chp
+ image: {{ .Values.proxy.chp.image.name }}:{{ .Values.proxy.chp.image.tag }}
+ {{- $hubNameAsEnv := include "jupyterhub.hub.fullname" . | upper | replace "-" "_" }}
+ {{- $hubHost := printf "http://%s:$(%s_SERVICE_PORT)" (include "jupyterhub.hub.fullname" .) $hubNameAsEnv }}
+ command:
+ - configurable-http-proxy
+ - "--ip="
+ - "--api-ip="
+ - --api-port=8001
+ - --default-target={{ .Values.proxy.chp.defaultTarget | default $hubHost }}
+ - --error-target={{ .Values.proxy.chp.errorTarget | default (printf "%s/hub/error" $hubHost) }}
+ {{- if $manualHTTPS }}
+ - --port=8443
+ - --redirect-port=8000
+ - --redirect-to=443
+ - --ssl-key=/etc/chp/tls/tls.key
+ - --ssl-cert=/etc/chp/tls/tls.crt
+ {{- else if $manualHTTPSwithsecret }}
+ - --port=8443
+ - --redirect-port=8000
+ - --redirect-to=443
+ - --ssl-key=/etc/chp/tls/{{ .Values.proxy.https.secret.key }}
+ - --ssl-cert=/etc/chp/tls/{{ .Values.proxy.https.secret.crt }}
+ {{- else }}
+ - --port=8000
+ {{- end }}
+ {{- if .Values.debug.enabled }}
+ - --log-level=debug
+ {{- end }}
+ {{- range .Values.proxy.chp.extraCommandLineFlags }}
+ - {{ tpl . $ }}
+ {{- end }}
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ volumeMounts:
+ - name: tls-secret
+ mountPath: /etc/chp/tls
+ readOnly: true
+ {{- end }}
+ {{- with .Values.proxy.chp.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ env:
+ - name: CONFIGPROXY_AUTH_TOKEN
+ valueFrom:
+ secretKeyRef:
+ # NOTE: References the chart managed k8s Secret even if
+ # hub.existingSecret is specified to avoid using the
+ # lookup function on the user managed k8s Secret.
+ name: {{ include "jupyterhub.hub.fullname" . }}
+ key: hub.config.ConfigurableHTTPProxy.auth_token
+ {{- with .Values.proxy.chp.extraEnv }}
+ {{- include "jupyterhub.extraEnv" . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.chp.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ ports:
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ - name: https
+ containerPort: 8443
+ {{- end }}
+ - name: http
+ containerPort: 8000
+ - name: api
+ containerPort: 8001
+ {{- if .Values.proxy.chp.livenessProbe.enabled }}
+ livenessProbe:
+ initialDelaySeconds: {{ .Values.proxy.chp.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.proxy.chp.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.proxy.chp.livenessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.proxy.chp.livenessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ port: https
+ scheme: HTTPS
+ {{- else }}
+ port: http
+ scheme: HTTP
+ {{- end }}
+ {{- end }}
+ {{- if .Values.proxy.chp.readinessProbe.enabled }}
+ readinessProbe:
+ initialDelaySeconds: {{ .Values.proxy.chp.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.proxy.chp.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.proxy.chp.readinessProbe.timeoutSeconds }}
+ failureThreshold: {{ .Values.proxy.chp.readinessProbe.failureThreshold }}
+ httpGet:
+ path: /_chp_healthz
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ port: https
+ scheme: HTTPS
+ {{- else }}
+ port: http
+ scheme: HTTP
+ {{- end }}
+ {{- end }}
+ {{- with .Values.proxy.chp.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.proxy.chp.extraPodSpec }}
+ {{- . | toYaml | nindent 6 }}
+ {{- end }}
diff --git a/jupyterhub/templates/proxy/netpol.yaml b/jupyterhub/templates/proxy/netpol.yaml
new file mode 100644
index 0000000..0af853a
--- /dev/null
+++ b/jupyterhub/templates/proxy/netpol.yaml
@@ -0,0 +1,108 @@
+{{- $HTTPS := .Values.proxy.https.enabled -}}
+{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
+{{- $manualHTTPS := and $HTTPS (eq .Values.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and $HTTPS (eq .Values.proxy.https.type "secret") -}}
+{{- if .Values.proxy.chp.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "jupyterhub.proxy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+
+ # IMPORTANT:
+ # NetworkPolicy's ingress "from" and egress "to" rule specifications require
+ # great attention to detail. A quick summary is:
+ #
+ # 1. You can provide "from"/"to" rules that provide access either ports or a
+ # subset of ports.
+ # 2. You can for each "from"/"to" rule provide any number of
+ # "sources"/"destinations" of four different kinds.
+ # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
+ # - namespaceSelector - targets all pods running in namespaces with a certain label
+ # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
+ # - ipBlock - targets network traffic from/to a set of IP address ranges
+ #
+ # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
+ #
+ ingress:
+ {{- with .Values.proxy.chp.networkPolicy.allowedIngressPorts }}
+ # allow incoming traffic to these ports independent of source
+ - ports:
+ {{- range $port := . }}
+ - port: {{ $port }}
+ {{- end }}
+ {{- end }}
+
+ # allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port)
+ - ports:
+ - port: http
+ {{- if or $manualHTTPS $manualHTTPSwithsecret }}
+ - port: https
+ {{- end }}
+ from:
+ # source 1 - labeled pods
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-http: "true"
+ {{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }}
+ namespaceSelector:
+ matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
+ # source 2 - pods in labeled namespaces
+ - namespaceSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-http: "true"
+ {{- end }}
+
+ # allowed pods (hub.jupyter.org/network-access-proxy-api) --> proxy (api port)
+ - ports:
+ - port: api
+ from:
+ # source 1 - labeled pods
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-api: "true"
+ {{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }}
+ namespaceSelector:
+ matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
+ # source 2 - pods in labeled namespaces
+ - namespaceSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-proxy-api: "true"
+ {{- end }}
+
+ {{- with .Values.proxy.chp.networkPolicy.ingress}}
+ # depends, but default is nothing --> proxy
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+
+ egress:
+ # proxy --> hub
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
+
+ # proxy --> singleuser-server
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8888
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.chp.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/pdb.yaml b/jupyterhub/templates/proxy/pdb.yaml
new file mode 100644
index 0000000..d8651f5
--- /dev/null
+++ b/jupyterhub/templates/proxy/pdb.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.proxy.chp.pdb.enabled -}}
+{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
+{{- /* k8s 1.21+ required */ -}}
+apiVersion: policy/v1
+{{- else }}
+apiVersion: policy/v1beta1
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "jupyterhub.proxy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if not (typeIs "" .Values.proxy.chp.pdb.maxUnavailable) }}
+ maxUnavailable: {{ .Values.proxy.chp.pdb.maxUnavailable }}
+ {{- end }}
+ {{- if not (typeIs "" .Values.proxy.chp.pdb.minAvailable) }}
+ minAvailable: {{ .Values.proxy.chp.pdb.minAvailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/secret.yaml b/jupyterhub/templates/proxy/secret.yaml
new file mode 100644
index 0000000..d9ff8ad
--- /dev/null
+++ b/jupyterhub/templates/proxy/secret.yaml
@@ -0,0 +1,13 @@
+{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}}
+{{- if $manualHTTPS -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: kubernetes.io/tls
+data:
+ tls.crt: {{ .Values.proxy.https.manual.cert | required "Required configuration missing: proxy.https.manual.cert" | b64enc }}
+ tls.key: {{ .Values.proxy.https.manual.key | required "Required configuration missing: proxy.https.manual.key" | b64enc }}
+{{- end }}
diff --git a/jupyterhub/templates/proxy/service.yaml b/jupyterhub/templates/proxy/service.yaml
new file mode 100644
index 0000000..8a96eb1
--- /dev/null
+++ b/jupyterhub/templates/proxy/service.yaml
@@ -0,0 +1,80 @@
+{{- $enabled := .Values.proxy.https.enabled -}}
+{{- $autoHTTPS := and $enabled (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
+{{- $manualHTTPS := and $enabled (eq .Values.proxy.https.type "manual") -}}
+{{- $manualHTTPSwithsecret := and $enabled (eq .Values.proxy.https.type "secret") -}}
+{{- $offloadHTTPS := and $enabled (eq .Values.proxy.https.type "offload") -}}
+{{- $valid := or $autoHTTPS (or $manualHTTPS (or $manualHTTPSwithsecret $offloadHTTPS)) -}}
+{{- $HTTPS := and $enabled $valid -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "jupyterhub.proxy-api.fullname" . }}
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-api") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+spec:
+ selector:
+ {{- include "jupyterhub.matchLabels" . | nindent 4 }}
+ ports:
+ - port: 8001
+ targetPort: api
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "jupyterhub.proxy-public.fullname" . }}
+ labels:
+ {{- $_ := merge (dict "componentSuffix" "-public") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+ {{- with .Values.proxy.service.labels }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ {{- with .Values.proxy.service.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+spec:
+ selector:
+ {{- if $autoHTTPS }}
+ component: autohttps
+ {{- else }}
+ component: proxy
+ {{- end }}
+ release: {{ .Release.Name }}
+ ports:
+ {{- if $HTTPS }}
+ - name: https
+ port: 443
+ # When HTTPS termination is handled outside our helm chart, pass traffic
+ # coming in via this Service's port 443 to targeted pod's port meant for
+ # HTTP traffic.
+ {{- if $offloadHTTPS }}
+ targetPort: http
+ {{- else }}
+ targetPort: https
+ {{- end }}
+ {{- with .Values.proxy.service.nodePorts.https }}
+ nodePort: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if ne .Values.proxy.service.disableHttpPort true }}
+ - name: http
+ port: 80
+ targetPort: http
+ {{- with .Values.proxy.service.nodePorts.http }}
+ nodePort: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- with .Values.proxy.service.extraPorts }}
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ type: {{ .Values.proxy.service.type }}
+ {{- with .Values.proxy.service.loadBalancerIP }}
+ loadBalancerIP: {{ . }}
+ {{- end }}
+ {{- if eq .Values.proxy.service.type "LoadBalancer" }}
+ {{- with .Values.proxy.service.loadBalancerSourceRanges }}
+ loadBalancerSourceRanges:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+ {{- end }}
diff --git a/jupyterhub/templates/scheduling/_scheduling-helpers.tpl b/jupyterhub/templates/scheduling/_scheduling-helpers.tpl
new file mode 100644
index 0000000..0a1a741
--- /dev/null
+++ b/jupyterhub/templates/scheduling/_scheduling-helpers.tpl
@@ -0,0 +1,138 @@
+{{- define "jupyterhub.userNodeAffinityRequired" -}}
+{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "require" -}}
+- matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [user]
+{{- end }}
+{{- with .Values.singleuser.extraNodeAffinity.required }}
+{{- . | toYaml | nindent 0 }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userNodeAffinityPreferred" -}}
+{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "prefer" -}}
+- weight: 100
+ preference:
+ matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [user]
+{{- end }}
+{{- with .Values.singleuser.extraNodeAffinity.preferred }}
+{{- . | toYaml | nindent 0 }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAffinityRequired" -}}
+{{- with .Values.singleuser.extraPodAffinity.required -}}
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAffinityPreferred" -}}
+{{- with .Values.singleuser.extraPodAffinity.preferred -}}
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAntiAffinityRequired" -}}
+{{- with .Values.singleuser.extraPodAntiAffinity.required -}}
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
+
+{{- define "jupyterhub.userPodAntiAffinityPreferred" -}}
+{{- with .Values.singleuser.extraPodAntiAffinity.preferred -}}
+{{ . | toYaml }}
+{{- end }}
+{{- end }}
+
+
+
+{{- /*
+ jupyterhub.userAffinity:
+ It is used by user-placeholder to set the same affinity on them as the
+ spawned user pods spawned by kubespawner.
+*/}}
+{{- define "jupyterhub.userAffinity" -}}
+
+{{- $dummy := set . "nodeAffinityRequired" (include "jupyterhub.userNodeAffinityRequired" .) -}}
+{{- $dummy := set . "podAffinityRequired" (include "jupyterhub.userPodAffinityRequired" .) -}}
+{{- $dummy := set . "podAntiAffinityRequired" (include "jupyterhub.userPodAntiAffinityRequired" .) -}}
+{{- $dummy := set . "nodeAffinityPreferred" (include "jupyterhub.userNodeAffinityPreferred" .) -}}
+{{- $dummy := set . "podAffinityPreferred" (include "jupyterhub.userPodAffinityPreferred" .) -}}
+{{- $dummy := set . "podAntiAffinityPreferred" (include "jupyterhub.userPodAntiAffinityPreferred" .) -}}
+{{- $dummy := set . "hasNodeAffinity" (or .nodeAffinityRequired .nodeAffinityPreferred) -}}
+{{- $dummy := set . "hasPodAffinity" (or .podAffinityRequired .podAffinityPreferred) -}}
+{{- $dummy := set . "hasPodAntiAffinity" (or .podAntiAffinityRequired .podAntiAffinityPreferred) -}}
+
+{{- if .hasNodeAffinity -}}
+nodeAffinity:
+ {{- if .nodeAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ {{- .nodeAffinityRequired | nindent 6 }}
+ {{- end }}
+
+ {{- if .nodeAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .nodeAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- if .hasPodAffinity }}
+podAffinity:
+ {{- if .podAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAffinityRequired | nindent 4 }}
+ {{- end }}
+
+ {{- if .podAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- if .hasPodAntiAffinity }}
+podAntiAffinity:
+ {{- if .podAntiAffinityRequired }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAntiAffinityRequired | nindent 4 }}
+ {{- end }}
+
+ {{- if .podAntiAffinityPreferred }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ {{- .podAntiAffinityPreferred | nindent 4 }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
+
+
+
+{{- define "jupyterhub.coreAffinity" -}}
+{{- $require := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "require" -}}
+{{- $prefer := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "prefer" -}}
+{{- if or $require $prefer -}}
+affinity:
+ nodeAffinity:
+ {{- if $require }}
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [core]
+ {{- end }}
+ {{- if $prefer }}
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: hub.jupyter.org/node-purpose
+ operator: In
+ values: [core]
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/priorityclass.yaml b/jupyterhub/templates/scheduling/priorityclass.yaml
new file mode 100644
index 0000000..77c84cb
--- /dev/null
+++ b/jupyterhub/templates/scheduling/priorityclass.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.scheduling.podPriority.enabled }}
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "jupyterhub.priority.fullname" . }}
+ annotations:
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+ {{- $_ := merge (dict "componentLabel" "default-priority") . }}
+ {{- include "jupyterhub.labels" $_ | nindent 4 }}
+value: {{ .Values.scheduling.podPriority.defaultPriority }}
+globalDefault: {{ .Values.scheduling.podPriority.globalDefault }}
+description: "A default priority higher than user placeholders priority."
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml b/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml
new file mode 100644
index 0000000..ec84fb5
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-placeholder/pdb.yaml
@@ -0,0 +1,22 @@
+{{- /*
+The cluster autoscaler should be allowed to evict and reschedule these pods if
+it would help in order to scale down a node.
+*/}}
+{{- if .Values.scheduling.userPlaceholder.enabled -}}
+{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
+{{- /* k8s 1.21+ required */ -}}
+apiVersion: policy/v1
+{{- else }}
+apiVersion: policy/v1beta1
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "jupyterhub.user-placeholder.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ minAvailable: 0
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml b/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml
new file mode 100644
index 0000000..bdedbdd
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-placeholder/priorityclass.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.scheduling.podPriority.enabled }}
+{{- if .Values.scheduling.userPlaceholder.enabled -}}
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ annotations:
+ meta.helm.sh/release-name: "{{ .Release.Name }}"
+ meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+value: {{ .Values.scheduling.podPriority.userPlaceholderPriority }}
+globalDefault: false
+description: "With a priority higher or eqaul to a cluster autoscalers priority cutoff, a pod can trigger a cluster scale up. At the same time, placeholder pods priority should be lower than other pods to make them evictable."
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml b/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml
new file mode 100644
index 0000000..e0f6f59
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-placeholder/statefulset.yaml
@@ -0,0 +1,80 @@
+
+{{- /*
+These user-placeholder pods can be used to test cluster autoscaling in a
+controlled fashion.
+
+Example:
+$ echo 'Simulating four users...'
+$ kubectl scale sts/user-placeholder --replicas 4
+*/}}
+{{- if .Values.scheduling.userPlaceholder.enabled -}}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "jupyterhub.user-placeholder.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podManagementPolicy: Parallel
+ {{- if typeIs "int" .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
+ {{- end }}
+ replicas: {{ .Values.scheduling.userPlaceholder.replicas }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
+ template:
+ metadata:
+ {{- with .Values.scheduling.userPlaceholder.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- /* Changes here will cause the Deployment to restart the pods. */}}
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.scheduling.userPlaceholder.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
+ {{- end }}
+ {{- if .Values.scheduling.userScheduler.enabled }}
+ schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- end }}
+ {{- with .Values.singleuser.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- if include "jupyterhub.userAffinity" . }}
+ affinity:
+ {{- include "jupyterhub.userAffinity" . | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 0
+ automountServiceAccountToken: false
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userPlaceholder.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ containers:
+ - name: pause
+ image: {{ .Values.scheduling.userPlaceholder.image.name }}:{{ .Values.scheduling.userPlaceholder.image.tag }}
+ {{- if .Values.scheduling.userPlaceholder.resources }}
+ resources:
+ {{- .Values.scheduling.userPlaceholder.resources | toYaml | nindent 12 }}
+ {{- else if (include "jupyterhub.singleuser.resources" .) }}
+ resources:
+ {{- include "jupyterhub.singleuser.resources" . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.scheduling.userPlaceholder.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.scheduling.userPlaceholder.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml b/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml
new file mode 100644
index 0000000..22ea9a8
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-scheduler/configmap.yaml
@@ -0,0 +1,95 @@
+{{- if .Values.scheduling.userScheduler.enabled -}}
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+data:
+ {{- /*
+ This is configuration of a k8s official kube-scheduler binary running in the
+ user-scheduler pod.
+
+ ref: https://kubernetes.io/docs/reference/scheduling/config/
+ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta2/
+ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/
+
+ v1beta1 can be used with kube-scheduler binary version <=1.21
+ v1beta2 requires kube-scheduler binary version >=1.22
+ v1beta3 requires kube-scheduler binary version >=1.23
+
+ kube-scheduler binaries versioned >=1.21 will error in k8s clusters
+ versioned <=1.20. To support a modern version of kube-scheduler and k8s
+ versions <=1.20 upwards, we provide two scenarios:
+
+ 1. For k8s >= 1.21 we use a modern version of kube-scheduler and Helm chart
+ configuration works as expected.
+ 2. For k8s <= 1.20 we use a hardcoded version of kube-scheduler (v1.20.15)
+ and configuration (v1beta1) of kube-scheduler.
+ */}}
+ config.yaml: |
+ {{- /*
+ FIXME: We have added a workaround for EKS where
+ .Capabilities.KubeVersion.Minor can return a
+ string like "22+" instead of just "22".
+
+ See https://github.com/aws/eks-distro/issues/1128.
+ */}}
+ {{- if ge (atoi (.Capabilities.KubeVersion.Minor | replace "+" "")) 21 }}
+ apiVersion: kubescheduler.config.k8s.io/v1beta3
+ kind: KubeSchedulerConfiguration
+ leaderElection:
+ resourceLock: endpoints
+ resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resourceNamespace: "{{ .Release.Namespace }}"
+ profiles:
+ - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ {{- with .Values.scheduling.userScheduler.plugins }}
+ plugins:
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
+ {{- with .Values.scheduling.userScheduler.pluginConfig }}
+ pluginConfig:
+ {{- . | toYaml | nindent 10 }}
+ {{- end }}
+ {{- else }}
+ # WARNING: The tag of this image is hardcoded, and the
+ # "scheduling.userScheduler.plugins" configuration of the Helm
+ # chart that generated this resource manifest wasn't respected. If
+ # you install the Helm chart in a k8s cluster versioned 1.21 or
+ # higher, your configuration will be respected.
+ apiVersion: kubescheduler.config.k8s.io/v1beta1
+ kind: KubeSchedulerConfiguration
+ leaderElection:
+ resourceLock: endpoints
+ resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resourceNamespace: "{{ .Release.Namespace }}"
+ profiles:
+ - schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ plugins:
+ score:
+ disabled:
+ - name: SelectorSpread
+ - name: TaintToleration
+ - name: PodTopologySpread
+ - name: NodeResourcesBalancedAllocation
+ - name: NodeResourcesLeastAllocated
+ # Disable plugins to be allowed to enable them again with a
+ # different weight and avoid an error.
+ - name: NodePreferAvoidPods
+ - name: NodeAffinity
+ - name: InterPodAffinity
+ - name: ImageLocality
+ enabled:
+ - name: NodePreferAvoidPods
+ weight: 161051
+ - name: NodeAffinity
+ weight: 14631
+ - name: InterPodAffinity
+ weight: 1331
+ - name: NodeResourcesMostAllocated
+ weight: 121
+ - name: ImageLocality
+ weight: 11
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml b/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml
new file mode 100644
index 0000000..58bb23a
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-scheduler/deployment.yaml
@@ -0,0 +1,109 @@
+{{- if .Values.scheduling.userScheduler.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if typeIs "int" .Values.scheduling.userScheduler.revisionHistoryLimit }}
+ revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }}
+ {{- end }}
+ replicas: {{ .Values.scheduling.userScheduler.replicas }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "jupyterhub.matchLabels" . | nindent 8 }}
+ {{- with .Values.scheduling.userScheduler.labels }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ annotations:
+ checksum/config-map: {{ include (print $.Template.BasePath "/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
+ {{- with .Values.scheduling.userScheduler.annotations }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ spec:
+ {{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ serviceAccountName: {{ . }}
+ {{- end }}
+ {{- if .Values.scheduling.podPriority.enabled }}
+ priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
+ {{- end }}
+ {{- with .Values.scheduling.userScheduler.nodeSelector }}
+ nodeSelector:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- with concat .Values.scheduling.corePods.tolerations .Values.scheduling.userScheduler.tolerations }}
+ tolerations:
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
+ {{- include "jupyterhub.coreAffinity" . | nindent 6 }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ {{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userScheduler.image) }}
+ imagePullSecrets: {{ . }}
+ {{- end }}
+ containers:
+ - name: kube-scheduler
+ {{- /*
+ FIXME: We have added a workaround for EKS where
+ .Capabilities.KubeVersion.Minor can return a
+ string like "22+" instead of just "22".
+
+ See https://github.com/aws/eks-distro/issues/1128.
+ */}}
+ {{- if ge (atoi (.Capabilities.KubeVersion.Minor | replace "+" "")) 21 }}
+ image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }}
+ {{- else }}
+ # WARNING: The tag of this image is hardcoded, and the
+ # "scheduling.userScheduler.image.tag" configuration of the
+ # Helm chart that generated this resource manifest isn't
+ # respected. If you install the Helm chart in a k8s cluster
+ # versioned 1.21 or higher, your configuration will be
+ # respected.
+ image: {{ .Values.scheduling.userScheduler.image.name }}:v1.20.15
+ {{- end }}
+ {{- with .Values.scheduling.userScheduler.image.pullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ command:
+ - /usr/local/bin/kube-scheduler
+ # NOTE: --authentication-skip-lookup=true is used to avoid a
+ # seemingly harmless error, if we need to not skip
+ # "authentication lookup" in the future, see the linked issue.
+ #
+ # ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1894
+ - --config=/etc/user-scheduler/config.yaml
+ - --authentication-skip-lookup=true
+ - --v={{ .Values.scheduling.userScheduler.logLevel }}
+ volumeMounts:
+ - mountPath: /etc/user-scheduler
+ name: config
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ scheme: HTTPS
+ port: 10259
+ initialDelaySeconds: 15
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ scheme: HTTPS
+ port: 10259
+ {{- with .Values.scheduling.userScheduler.resources }}
+ resources:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.scheduling.userScheduler.containerSecurityContext }}
+ securityContext:
+ {{- . | toYaml | nindent 12 }}
+ {{- end }}
+ {{- with .Values.scheduling.userScheduler.extraPodSpec }}
+ {{- . | toYaml | nindent 6 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml b/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml
new file mode 100644
index 0000000..3a9544e
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-scheduler/pdb.yaml
@@ -0,0 +1,23 @@
+{{- if and .Values.scheduling.userScheduler.enabled .Values.scheduling.userScheduler.pdb.enabled -}}
+{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
+{{- /* k8s 1.21+ required */ -}}
+apiVersion: policy/v1
+{{- else }}
+apiVersion: policy/v1beta1
+{{- end }}
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ {{- if not (typeIs "" .Values.scheduling.userScheduler.pdb.maxUnavailable) }}
+ maxUnavailable: {{ .Values.scheduling.userScheduler.pdb.maxUnavailable }}
+ {{- end }}
+ {{- if not (typeIs "" .Values.scheduling.userScheduler.pdb.minAvailable) }}
+ minAvailable: {{ .Values.scheduling.userScheduler.pdb.minAvailable }}
+ {{- end }}
+ selector:
+ matchLabels:
+ {{- include "jupyterhub.matchLabels" . | nindent 6 }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml b/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml
new file mode 100644
index 0000000..f77640b
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-scheduler/rbac.yaml
@@ -0,0 +1,233 @@
+{{- if .Values.scheduling.userScheduler.enabled -}}
+{{- if .Values.rbac.create -}}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+rules:
+ # Copied from the system:kube-scheduler ClusterRole of the k8s version
+ # matching the kube-scheduler binary we use. A modification has been made to
+ # resourceName fields to remain relevant for how we have named our resources
+ # in this Helm chart.
+ #
+ # NOTE: These rules have been:
+ # - unchanged between 1.12 and 1.15
+ # - changed in 1.16
+ # - changed in 1.17
+ # - unchanged between 1.18 and 1.20
+ # - changed in 1.21: get/list/watch permission for namespace,
+ # csidrivers, csistoragecapacities was added.
+ # - unchanged between 1.22 and 1.23
+ #
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.23.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L705-L861
+ - apiGroups:
+ - ""
+ - events.k8s.io
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - apiGroups:
+ - coordination.k8s.io
+ resourceNames:
+ - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resources:
+ - leases
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resourceNames:
+ - {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - delete
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - bindings
+ - pods/binding
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ - update
+ - apiGroups:
+ - ""
+ resources:
+ - replicationcontrollers
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - apps
+ - extensions
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+ - apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csinodes
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csidrivers
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - csistoragecapacities
+ verbs:
+ - get
+ - list
+ - watch
+
+ # Copied from the system:volume-scheduler ClusterRole of the k8s version
+ # matching the kube-scheduler binary we use.
+ #
+ # NOTE: These rules have not changed between 1.12 and 1.23.
+ #
+ # ref: https://github.com/kubernetes/kubernetes/blob/v1.23.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1280-L1307
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ namespace: "{{ .Release.Namespace }}"
+roleRef:
+ kind: ClusterRole
+ name: {{ include "jupyterhub.user-scheduler.fullname" . }}
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml b/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml
new file mode 100644
index 0000000..f84ffc1
--- /dev/null
+++ b/jupyterhub/templates/scheduling/user-scheduler/serviceaccount.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.scheduling.userScheduler.enabled -}}
+{{- if .Values.scheduling.userScheduler.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+ {{- with .Values.scheduling.userScheduler.serviceAccount.annotations }}
+ annotations:
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/singleuser/netpol.yaml b/jupyterhub/templates/singleuser/netpol.yaml
new file mode 100644
index 0000000..f388b81
--- /dev/null
+++ b/jupyterhub/templates/singleuser/netpol.yaml
@@ -0,0 +1,99 @@
+{{- if and .Values.singleuser.networkPolicy.enabled -}}
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "jupyterhub.singleuser.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
+ policyTypes:
+ - Ingress
+ - Egress
+
+ # IMPORTANT:
+ # NetworkPolicy's ingress "from" and egress "to" rule specifications require
+ # great attention to detail. A quick summary is:
+ #
+ # 1. You can provide "from"/"to" rules that provide access either ports or a
+ # subset of ports.
+ # 2. You can for each "from"/"to" rule provide any number of
+ # "sources"/"destinations" of four different kinds.
+ # - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
+ # - namespaceSelector - targets all pods running in namespaces with a certain label
+ # - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
+ # - ipBlock - targets network traffic from/to a set of IP address ranges
+ #
+ # Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
+ #
+ ingress:
+ {{- with .Values.singleuser.networkPolicy.allowedIngressPorts }}
+ # allow incoming traffic to these ports independent of source
+ - ports:
+ {{- range $port := . }}
+ - port: {{ $port }}
+ {{- end }}
+ {{- end }}
+
+ # allowed pods (hub.jupyter.org/network-access-singleuser) --> singleuser-server
+ - ports:
+ - port: notebook-port
+ from:
+ # source 1 - labeled pods
+ - podSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- if eq .Values.singleuser.networkPolicy.interNamespaceAccessLabels "accept" }}
+ namespaceSelector:
+ matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
+ # source 2 - pods in labeled namespaces
+ - namespaceSelector:
+ matchLabels:
+ hub.jupyter.org/network-access-singleuser: "true"
+ {{- end }}
+
+ {{- with .Values.singleuser.networkPolicy.ingress }}
+ # depends, but default is nothing --> singleuser-server
+ {{- . | toYaml | nindent 4 }}
+ {{- end }}
+
+ egress:
+ # singleuser-server --> hub
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "hub") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8081
+
+ # singleuser-server --> proxy
+ # singleuser-server --> autohttps
+ #
+ # While not critical for core functionality, a user or library code may rely
+ # on communicating with the proxy or autohttps pods via a k8s Service it can
+ # detected from well known environment variables.
+ #
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "proxy") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8000
+ - to:
+ - podSelector:
+ matchLabels:
+ {{- $_ := merge (dict "componentLabel" "autohttps") . }}
+ {{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
+ ports:
+ - port: 8080
+ - port: 8443
+
+ {{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.singleuser.networkPolicy)) }}
+ {{- . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/jupyterhub/templates/singleuser/secret.yaml b/jupyterhub/templates/singleuser/secret.yaml
new file mode 100644
index 0000000..f4a5fe2
--- /dev/null
+++ b/jupyterhub/templates/singleuser/secret.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.singleuser.extraFiles }}
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ include "jupyterhub.singleuser.fullname" . }}
+ labels:
+ {{- include "jupyterhub.labels" . | nindent 4 }}
+type: Opaque
+{{- with include "jupyterhub.extraFiles.data" .Values.singleuser.extraFiles }}
+data:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- with include "jupyterhub.extraFiles.stringData" .Values.singleuser.extraFiles }}
+stringData:
+ {{- . | nindent 2 }}
+{{- end }}
+{{- end }}
diff --git a/jupyterhub/values-minikube.yaml b/jupyterhub/values-minikube.yaml
new file mode 100644
index 0000000..e1c90aa
--- /dev/null
+++ b/jupyterhub/values-minikube.yaml
@@ -0,0 +1,663 @@
+# fullnameOverride and nameOverride distinguishes blank strings, null values,
+# and non-blank strings. For more details, see the configuration reference.
+fullnameOverride: ""
+nameOverride:
+
+# custom can contain anything you want to pass to the hub pod, as all passed
+# Helm template values will be made available there.
+custom: {}
+
+# imagePullSecret is configuration to create a k8s Secret that Helm chart's pods
+# can get credentials from to pull their images.
+imagePullSecret:
+ create: false
+ automaticReferenceInjection: true
+ registry:
+ username:
+ password:
+ email:
+# imagePullSecrets is configuration to reference the k8s Secret resources the
+# Helm chart's pods can get credentials from to pull their images.
+imagePullSecrets: []
+
+# hub relates to the hub pod, responsible for running JupyterHub, its configured
+# Authenticator class KubeSpawner, and its configured Proxy class
+# ConfigurableHTTPProxy. KubeSpawner creates the user pods, and
+# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
+# the proxy pod.
+hub:
+ revisionHistoryLimit:
+ config:
+ JupyterHub:
+ admin_access: true
+ authenticator_class: dummy
+ service:
+ type: ClusterIP
+ annotations: {}
+ ports:
+ nodePort:
+ extraPorts: []
+ loadBalancerIP:
+ baseUrl: /
+ cookieSecret:
+ initContainers: []
+ nodeSelector: {}
+ tolerations: []
+ concurrentSpawnLimit: 64
+ consecutiveFailureLimit: 5
+ activeServerLimit:
+ deploymentStrategy:
+ ## type: Recreate
+ ## - sqlite-pvc backed hubs require the Recreate deployment strategy as a
+ ## typical PVC storage can only be bound to one pod at the time.
+ ## - JupyterHub isn't designed to support being run in parallell. More work
+ ## needs to be done in JupyterHub itself for a fully highly available (HA)
+ ## deployment of JupyterHub on k8s is to be possible.
+ type: Recreate
+ db:
+ type: sqlite-pvc
+ upgrade:
+ pvc:
+ annotations: {}
+ selector: {}
+ accessModes:
+ - ReadWriteOnce
+ storage: 10Gi
+ subPath:
+ storageClassName: standard
+ url:
+ password:
+ labels: {}
+ annotations: {}
+ command: []
+ args: []
+ extraConfig: {}
+ extraFiles: {}
+ extraEnv: {
+ JUPYTERHUB_ENV: "dev",
+ JUPYTERHUB_SINGLE_USER_IMAGE_NOTEBOOKS: "",
+ }
+ extraContainers: []
+ extraVolumes: []
+ extraVolumeMounts: []
+ image:
+ name: hubimage
+ tag: "dev"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
+ podSecurityContext:
+ fsGroup: 1000
+ containerSecurityContext:
+ runAsUser: 1000
+ runAsGroup: 1000
+ allowPrivilegeEscalation: true
+ lifecycle: {}
+ loadRoles: {}
+ services: {}
+ pdb:
+ enabled: false
+ maxUnavailable:
+ minAvailable: 1
+ networkPolicy:
+ enabled: false
+ ingress: []
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+ allowNamedServers: true
+ namedServerLimitPerUser:
+ authenticatePrometheus:
+ redirectToServer:
+ shutdownOnLogout:
+ templatePaths: []
+ templateVars: {}
+ livenessProbe:
+ # The livenessProbe's aim to give JupyterHub sufficient time to startup but
+ # be able to restart if it becomes unresponsive for ~5 min.
+ enabled: true
+ initialDelaySeconds: 300
+ periodSeconds: 10
+ failureThreshold: 30
+ timeoutSeconds: 3
+ readinessProbe:
+ # The readinessProbe's aim is to provide a successful startup indication,
+ # but following that never become unready before its livenessProbe fail and
+ # restarts it if needed. To become unready following startup serves no
+ # purpose as there are no other pod to fallback to in our non-HA deployment.
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 2
+ failureThreshold: 1000
+ timeoutSeconds: 1
+ existingSecret:
+ serviceAccount:
+ create: true
+ name:
+ annotations: {}
+ extraPodSpec: {}
+
+rbac:
+ create: true
+
+# proxy relates to the proxy pod, the proxy-public service, and the autohttps
+# pod and proxy-http service.
+proxy:
+ secretToken: '8af21006c7c3dc381c5d3b4b27e2c99e6311d5fc243fqbf9a14646020197d67c'
+ annotations: {}
+ deploymentStrategy:
+ ## type: Recreate
+ ## - JupyterHub's interaction with the CHP proxy becomes a lot more robust
+ ## with this configuration. To understand this, consider that JupyterHub
+ ## during startup will interact a lot with the k8s service to reach a
+ ## ready proxy pod. If the hub pod during a helm upgrade is restarting
+ ## directly while the proxy pod is making a rolling upgrade, the hub pod
+ ## could end up running a sequence of interactions with the old proxy pod
+ ## and finishing up the sequence of interactions with the new proxy pod.
+ ## As CHP proxy pods carry individual state this is very error prone. One
+ ## outcome when not using Recreate as a strategy has been that user pods
+ ## have been deleted by the hub pod because it considered them unreachable
+ ## as it only configured the old proxy pod but not the new before trying
+ ## to reach them.
+ type: Recreate
+ ## rollingUpdate:
+ ## - WARNING:
+ ## This is required to be set explicitly blank! Without it being
+ ## explicitly blank, k8s will let eventual old values under rollingUpdate
+ ## remain and then the Deployment becomes invalid and a helm upgrade would
+ ## fail with an error like this:
+ ##
+ ## UPGRADE FAILED
+ ## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
+ ## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
+ rollingUpdate:
+ # service relates to the proxy-public service
+ service:
+ type: LoadBalancer
+ labels: {}
+ annotations: {}
+ nodePorts:
+ http:
+ https:
+ disableHttpPort: false
+ extraPorts: []
+ loadBalancerIP:
+ loadBalancerSourceRanges: []
+ # chp relates to the proxy pod, which is responsible for routing traffic based
+ # on dynamic configuration sent from JupyterHub to CHP's REST API.
+ chp:
+ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: jupyterhub/configurable-http-proxy
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "4.5.3" # https://github.com/jupyterhub/configurable-http-proxy/releases
+ pullPolicy:
+ pullSecrets: []
+ extraCommandLineFlags: []
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ failureThreshold: 30
+ timeoutSeconds: 3
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: 0
+ periodSeconds: 2
+ failureThreshold: 1000
+ timeoutSeconds: 1
+ resources: {}
+ defaultTarget:
+ errorTarget:
+ extraEnv: {}
+ nodeSelector: {}
+ tolerations: []
+ networkPolicy:
+ enabled: false
+ ingress: []
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+ enabled: false
+ maxUnavailable:
+ minAvailable: 1
+ extraPodSpec: {}
+ # traefik relates to the autohttps pod, which is responsible for TLS
+ # termination when proxy.https.type=letsencrypt.
+ traefik:
+ revisionHistoryLimit:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: traefik
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ tag: "v2.8.4" # ref: https://hub.docker.com/_/traefik?tab=tags
+ pullPolicy:
+ pullSecrets: []
+ hsts:
+ includeSubdomains: false
+ preload: false
+ maxAge: 15724800 # About 6 months
+ resources: {}
+ labels: {}
+ extraInitContainers: []
+ extraEnv: {}
+ extraVolumes: []
+ extraVolumeMounts: []
+ extraStaticConfig: {}
+ extraDynamicConfig: {}
+ nodeSelector: {}
+ tolerations: []
+ extraPorts: []
+ networkPolicy:
+ enabled: false
+ ingress: []
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: true
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: true
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: [http, https]
+ pdb:
+ enabled: false
+ maxUnavailable:
+ minAvailable: 1
+ serviceAccount:
+ create: true
+ name:
+ annotations: {}
+ extraPodSpec: {}
+ secretSync:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: jupyterhub/k8s-secret-sync
+ tag: "2.0.0"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
+ labels: {}
+ https:
+ enabled: false
+ type: letsencrypt
+ #type: letsencrypt, manual, offload, secret
+ letsencrypt:
+ contactEmail:
+ # Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
+ acmeServer: https://acme-v02.api.letsencrypt.org/directory
+ manual:
+ key:
+ cert:
+ secret:
+ name:
+ key: tls.key
+ crt: tls.crt
+ hosts: []
+
+# singleuser relates to the configuration of KubeSpawner which runs in the hub
+# pod, and its spawning of user pods such as jupyter-myusername.
+singleuser:
+ podNameTemplate:
+ extraTolerations: []
+ nodeSelector: {"k8s.scaleway.com/pool-name": "processing-node-pool-dev"}
+ extraNodeAffinity:
+ required: []
+ preferred: []
+ extraPodAffinity:
+ required: []
+ preferred: []
+ extraPodAntiAffinity:
+ required: []
+ preferred: []
+ networkTools:
+ image:
+ name: jupyterhub/k8s-network-tools
+ tag: "2.0.0"
+ pullPolicy:
+ pullSecrets: []
+ resources: {}
+ cloudMetadata:
+ # block set to true will append a privileged initContainer using the
+ # iptables to block the sensitive metadata server at the provided ip.
+ blockWithIptables: true
+ ip: 169.254.169.254
+ networkPolicy:
+ enabled: false
+ ingress: []
+ egress: []
+ egressAllowRules:
+ cloudMetadataServer: false
+ dnsPortsPrivateIPs: true
+ nonPrivateIPs: true
+ privateIPs: false
+ interNamespaceAccessLabels: ignore
+ allowedIngressPorts: []
+ events: true
+ extraAnnotations: {}
+ extraLabels:
+ hub.jupyter.org/network-access-hub: "true"
+ extraFiles: {}
+ extraEnv: {}
+ lifecycleHooks: {}
+ initContainers: []
+ extraContainers: []
+ allowPrivilegeEscalation: false
+ uid: 1000
+ fsGid: 100
+ serviceAccountName:
+ storage:
+ type: dynamic
+ extraLabels: {}
+ extraVolumes: []
+ extraVolumeMounts: []
+ static:
+ pvcName:
+ subPath: "{username}"
+ capacity: 10Gi
+ homeMountPath: /home/jovyan
+ dynamic:
+ storageClass: standard
+ pvcNameTemplate: claim-{username}{servername}
+ volumeNameTemplate: volume-{username}{servername}
+ storageAccessModes: [ReadWriteOnce]
+ image:
+ name: jupyterhub/k8s-singleuser-sample
+ tag: "2.0.0"
+ pullPolicy:
+ pullSecrets: []
+ startTimeout: 6000
+ cpu:
+ limit:
+ guarantee:
+ memory:
+ limit:
+ guarantee: 1G
+ extraResource:
+ limits: {}
+ guarantees: {}
+ cmd: jupyterhub-singleuser
+ defaultUrl:
+ extraPodConfig: {}
+ profileList: []
+
+# scheduling relates to the user-scheduler pods and user-placeholder pods.
+scheduling:
+ userScheduler:
+ enabled: true
+ revisionHistoryLimit:
+ replicas: 2
+ logLevel: 4
+ # plugins are configured on the user-scheduler to make us score how we
+ # schedule user pods in a way to help us schedule on the most busy node. By
+ # doing this, we help scale down more effectively. It isn't obvious how to
+ # enable/disable scoring plugins, and configure them, to accomplish this.
+ #
+ # plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
+ # migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
+ #
+ plugins:
+ score:
+ # These scoring plugins are enabled by default according to
+ # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
+ # 2022-02-22.
+ #
+ # Enabled with high priority:
+ # - NodeAffinity
+ # - InterPodAffinity
+ # - NodeResourcesFit
+ # - ImageLocality
+ # Remains enabled with low default priority:
+ # - TaintToleration
+ # - PodTopologySpread
+ # - VolumeBinding
+ # Disabled for scoring:
+ # - NodeResourcesBalancedAllocation
+ #
+ disabled:
+ # We disable these plugins (with regards to scoring) to not interfere
+ # or complicate our use of NodeResourcesFit.
+ - name: NodeResourcesBalancedAllocation
+ # Disable plugins to be allowed to enable them again with a different
+ # weight and avoid an error.
+ - name: NodeAffinity
+ - name: InterPodAffinity
+ - name: NodeResourcesFit
+ - name: ImageLocality
+ enabled:
+ - name: NodeAffinity
+ weight: 14631
+ - name: InterPodAffinity
+ weight: 1331
+ - name: NodeResourcesFit
+ weight: 121
+ - name: ImageLocality
+ weight: 11
+ pluginConfig:
+ # Here we declare that we should optimize pods to fit based on a
+ # MostAllocated strategy instead of the default LeastAllocated.
+ - name: NodeResourcesFit
+ args:
+ scoringStrategy:
+ resources:
+ - name: cpu
+ weight: 1
+ - name: memory
+ weight: 1
+ type: MostAllocated
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ # IMPORTANT: Bumping the minor version of this binary should go hand in
+ # hand with an inspection of the user-scheduelrs RBAC resources
+ # that we have forked in
+ # templates/scheduling/user-scheduler/rbac.yaml.
+ #
+ # Debugging advice:
+ #
+ # - Is configuration of kube-scheduler broken in
+ # templates/scheduling/user-scheduler/configmap.yaml?
+ #
+ # - Is the kube-scheduler binary's compatibility to work
+ # against a k8s api-server that is too new or too old?
+ #
+ # - You can update the GitHub workflow that runs tests to
+ # include "deploy/user-scheduler" in the k8s namespace report
+ # and reduce the user-scheduler deployments replicas to 1 in
+ # dev-config.yaml to get relevant logs from the user-scheduler
+ # pods. Inspect the "Kubernetes namespace report" action!
+ #
+ # - Typical failures are that kube-scheduler fails to search for
+ # resources via its "informers", and won't start trying to
+ # schedule pods before they succeed which may require
+ # additional RBAC permissions or that the k8s api-server is
+ # aware of the resources.
+ #
+ # - If "successfully acquired lease" can be seen in the logs, it
+ # is a good sign kube-scheduler is ready to schedule pods.
+ #
+ name: k8s.gcr.io/kube-scheduler
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow. The minor version is pinned in the
+ # workflow, and should be updated there if a minor version bump is done
+ # here.
+ #
+ tag: "v1.23.10" # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
+ pullPolicy:
+ pullSecrets: []
+ nodeSelector: {}
+ tolerations: []
+ labels: {}
+ annotations: {}
+ pdb:
+ enabled: true
+ maxUnavailable: 1
+ minAvailable:
+ resources: {}
+ serviceAccount:
+ create: true
+ name:
+ annotations: {}
+ extraPodSpec: {}
+ podPriority:
+ enabled: false
+ globalDefault: false
+ defaultPriority: 0
+ imagePullerPriority: -5
+ userPlaceholderPriority: -10
+ userPlaceholder:
+ enabled: true
+ image:
+ name: k8s.gcr.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update prePuller.pause.image.tag
+ #
+ tag: "3.8"
+ pullPolicy:
+ pullSecrets: []
+ revisionHistoryLimit:
+ replicas: 0
+ labels: {}
+ annotations: {}
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ resources: {}
+ corePods:
+ tolerations:
+ - key: hub.jupyter.org/dedicated
+ operator: Equal
+ value: core
+ effect: NoSchedule
+ - key: hub.jupyter.org_dedicated
+ operator: Equal
+ value: core
+ effect: NoSchedule
+ nodeAffinity:
+ matchNodePurpose: prefer
+ userPods:
+ tolerations:
+ - key: hub.jupyter.org/dedicated
+ operator: Equal
+ value: user
+ effect: NoSchedule
+ - key: hub.jupyter.org_dedicated
+ operator: Equal
+ value: user
+ effect: NoSchedule
+ nodeAffinity:
+ matchNodePurpose: prefer
+
+# prePuller relates to the hook|continuous-image-puller DaemonsSets
+prePuller:
+ revisionHistoryLimit:
+ labels: {}
+ annotations: {}
+ resources: {}
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ extraTolerations: []
+ # hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
+ hook:
+ enabled: true
+ pullOnlyOnChanges: true
+ # image and the configuration below relates to the hook-image-awaiter Job
+ image:
+ name: jupyterhub/k8s-image-awaiter
+ tag: "2.0.0"
+ pullPolicy:
+ pullSecrets: []
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ podSchedulingWaitDuration: 10
+ nodeSelector: {}
+ tolerations: []
+ resources: {}
+ serviceAccount:
+ create: true
+ name:
+ annotations: {}
+ continuous:
+ enabled: true
+ pullProfileListImages: true
+ extraImages: {}
+ pause:
+ containerSecurityContext:
+ runAsUser: 65534 # nobody user
+ runAsGroup: 65534 # nobody group
+ allowPrivilegeEscalation: false
+ image:
+ name: k8s.gcr.io/pause
+ # tag is automatically bumped to new patch versions by the
+ # watch-dependencies.yaml workflow.
+ #
+ # If you update this, also update scheduling.userPlaceholder.image.tag
+ #
+ tag: "3.8"
+ pullPolicy:
+ pullSecrets: []
+
+ingress:
+ enabled: false
+ annotations: {}
+ ingressClassName:
+ hosts: []
+ pathSuffix:
+ pathType: Prefix
+ tls: []
+
+# cull relates to the jupyterhub-idle-culler service, responsible for evicting
+# inactive singleuser pods.
+#
+# The configuration below, except for enabled, corresponds to command-line flags
+# for jupyterhub-idle-culler as documented here:
+# https://github.com/jupyterhub/jupyterhub-idle-culler#as-a-standalone-script
+#
+cull:
+ enabled: true
+ users: false # --cull-users
+ adminUsers: true # --cull-admin-users
+ removeNamedServers: false # --remove-named-servers
+ timeout: 3600 # --timeout
+ every: 600 # --cull-every
+ concurrency: 10 # --concurrency
+ maxAge: 0 # --max-age
+
+debug:
+ enabled: false
+
+global:
+ safeToShowValues: false
diff --git a/jupyterhub/values.schema.json b/jupyterhub/values.schema.json
new file mode 100644
index 0000000..cc589df
--- /dev/null
+++ b/jupyterhub/values.schema.json
@@ -0,0 +1 @@
+{"$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "additionalProperties": false, "required": ["imagePullSecrets", "hub", "proxy", "singleuser", "ingress", "prePuller", "custom", "cull", "debug", "rbac", "global"], "properties": {"fullnameOverride": {"type": ["string", "null"]}, "nameOverride": {"type": ["string", "null"]}, "imagePullSecret": {"type": "object", "required": ["create"], "if": {"properties": {"create": {"const": true}}}, "then": {"additionalProperties": false, "required": ["registry", "username", "password"], "description": "This is configuration to create a k8s Secret resource of `type:\nkubernetes.io/dockerconfigjson`, with credentials to pull images from a\nprivate image registry. If you opt to do so, it will be available for use\nby all pods in their respective `spec.imagePullSecrets` alongside other\nk8s Secrets defined in `imagePullSecrets` or the pod respective\n`...image.pullSecrets` configuration.\n\nIn other words, using this configuration option can automate both the\notherwise manual creation of a k8s Secret and the otherwise manual\nconfiguration to reference this k8s Secret in all the pods of the Helm\nchart.\n\n```sh\n# you won't need to create a k8s Secret manually...\nkubectl create secret docker-registry image-pull-secret \\\n --docker-server= \\\n --docker-username= \\\n --docker-email= \\\n --docker-password=\n```\n\nIf you just want to let all Pods reference an existing secret, use the\n[`imagePullSecrets`](schema_imagePullSecrets) configuration instead.\n", "properties": {"create": {"type": "boolean", "description": "Toggle the creation of the k8s Secret with provided credentials to\naccess a private image registry.\n"}, "automaticReferenceInjection": {"type": "boolean", "description": "Toggle the automatic reference injection of the created Secret to all\npods' `spec.imagePullSecrets` configuration.\n"}, "registry": {"type": "string", "description": "Name of the private registry you want to create a credential set for.\nIt will default to Docker Hub's image registry.\n\nExamples:\n - https://index.docker.io/v1/\n - quay.io\n - eu.gcr.io\n - alexmorreale.privatereg.net\n"}, "username": {"type": "string", "description": "Name of the user you want to use to connect to your private registry.\n\nFor external gcr.io, you will use the `_json_key`.\n\nExamples:\n - alexmorreale\n - alex@pfc.com\n - _json_key\n"}, "password": {"type": "string", "description": "Password for the private image registry's user.\n\nExamples:\n - plaintextpassword\n - abc123SECRETzyx098\n\nFor gcr.io registries the password will be a big JSON blob for a\nGoogle cloud service account, it should look something like below.\n\n```yaml\npassword: |-\n {\n \"type\": \"service_account\",\n \"project_id\": \"jupyter-se\",\n \"private_key_id\": \"f2ba09118a8d3123b3321bd9a7d6d0d9dc6fdb85\",\n ...\n }\n```\n"}, "email": {"type": ["string", "null"], "description": "Specification of an email is most often not required, but it is\nsupported.\n"}}}}, "imagePullSecrets": {"type": "array"}, "hub": {"type": "object", "additionalProperties": false, "required": ["baseUrl"], "properties": {"revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "config": {"type": "object", "additionalProperties": true}, "extraFiles": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "object", "additionalProperties": false, "required": ["mountPath"], "oneOf": [{"required": ["data"]}, {"required": ["stringData"]}, {"required": ["binaryData"]}], "properties": {"mountPath": {"type": "string"}, "data": {"type": "object", "additionalProperties": true}, "stringData": {"type": "string"}, "binaryData": {"type": "string"}, "mode": {"type": "number"}}}}}, "baseUrl": {"type": "string"}, "command": {"type": "array"}, "args": {"type": "array"}, "cookieSecret": {"type": ["string", "null"]}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "networkPolicy": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "ingress": {"type": "array"}, "egress": {"type": "array"}, "egressAllowRules": {"type": "object", "additionalProperties": false, "properties": {"cloudMetadataServer": {"type": "boolean"}, "dnsPortsPrivateIPs": {"type": "boolean"}, "nonPrivateIPs": {"type": "boolean"}, "privateIPs": {"type": "boolean"}}}, "interNamespaceAccessLabels": {"enum": ["accept", "ignore"]}, "allowedIngressPorts": {"type": "array"}}}, "db": {"type": "object", "additionalProperties": false, "properties": {"type": {"enum": ["sqlite-pvc", "sqlite-memory", "mysql", "postgres", "other"]}, "pvc": {"type": "object", "additionalProperties": false, "required": ["storage"], "properties": {"annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "selector": {"type": "object", "additionalProperties": true}, "storage": {"type": "string"}, "accessModes": {"type": "array", "items": {"type": ["string", "null"]}}, "storageClassName": {"type": ["string", "null"]}, "subPath": {"type": ["string", "null"]}}}, "upgrade": {"type": ["boolean", "null"]}, "url": {"type": ["string", "null"]}, "password": {"type": ["string", "null"]}}}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "initContainers": {"type": "array"}, "extraEnv": {"type": ["object", "array"], "additionalProperties": true}, "extraConfig": {"type": "object", "additionalProperties": true}, "fsGid": {"type": ["integer", "null"], "minimum": 0}, "service": {"type": "object", "additionalProperties": false, "properties": {"type": {"enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"]}, "ports": {"type": "object", "additionalProperties": false, "properties": {"nodePort": {"type": ["integer", "null"], "minimum": 0}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "extraPorts": {"type": "array"}, "loadBalancerIP": {"type": ["string", "null"]}}}, "pdb": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "maxUnavailable": {"type": ["integer", "null"]}, "minAvailable": {"type": ["integer", "null"]}}}, "existingSecret": {"type": ["string", "null"]}, "nodeSelector": {"type": "object", "additionalProperties": true}, "tolerations": {"type": "array"}, "activeServerLimit": {"type": ["integer", "null"]}, "allowNamedServers": {"type": ["boolean", "null"]}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "authenticatePrometheus": {"type": ["boolean", "null"]}, "concurrentSpawnLimit": {"type": ["integer", "null"]}, "consecutiveFailureLimit": {"type": ["integer", "null"]}, "podSecurityContext": {"additionalProperties": true}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "deploymentStrategy": {"type": "object", "additionalProperties": false, "properties": {"rollingUpdate": {"type": ["string", "null"]}, "type": {"type": ["string", "null"]}}}, "extraContainers": {"type": "array"}, "extraVolumeMounts": {"type": "array"}, "extraVolumes": {"type": "array"}, "livenessProbe": {"type": "object", "additionalProperties": true, "required": ["enabled"], "if": {"properties": {"enabled": {"const": true}}}, "then": {"description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n"}}, "readinessProbe": {"type": "object", "additionalProperties": true, "required": ["enabled"], "if": {"properties": {"enabled": {"const": true}}}, "then": {"description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n"}}, "namedServerLimitPerUser": {"type": ["integer", "null"]}, "redirectToServer": {"type": ["boolean", "null"]}, "resources": {"type": "object", "additionalProperties": true}, "lifecycle": {"type": "object", "additionalProperties": false, "properties": {"postStart": {"type": "object", "additionalProperties": true}, "preStop": {"type": "object", "additionalProperties": true}}}, "services": {"type": "object", "additionalProperties": true, "properties": {"name": {"type": "string"}, "admin": {"type": "boolean"}, "command": {"type": ["string", "array"]}, "url": {"type": "string"}, "api_token": {"type": ["string", "null"]}, "apiToken": {"type": ["string", "null"]}}}, "loadRoles": {"type": "object", "additionalProperties": true}, "shutdownOnLogout": {"type": ["boolean", "null"]}, "templatePaths": {"type": "array"}, "templateVars": {"type": "object", "additionalProperties": true}, "serviceAccount": {"type": "object", "required": ["create"], "additionalProperties": false, "properties": {"create": {"type": "boolean"}, "name": {"type": ["string", "null"]}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}}}, "extraPodSpec": {"type": "object", "additionalProperties": true}}}, "proxy": {"type": "object", "additionalProperties": false, "properties": {"chp": {"type": "object", "additionalProperties": false, "properties": {"revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "networkPolicy": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "ingress": {"type": "array"}, "egress": {"type": "array"}, "egressAllowRules": {"type": "object", "additionalProperties": false, "properties": {"cloudMetadataServer": {"type": "boolean"}, "dnsPortsPrivateIPs": {"type": "boolean"}, "nonPrivateIPs": {"type": "boolean"}, "privateIPs": {"type": "boolean"}}}, "interNamespaceAccessLabels": {"enum": ["accept", "ignore"]}, "allowedIngressPorts": {"type": "array"}}}, "extraCommandLineFlags": {"type": "array"}, "extraEnv": {"type": ["object", "array"], "additionalProperties": true}, "pdb": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "maxUnavailable": {"type": ["integer", "null"]}, "minAvailable": {"type": ["integer", "null"]}}}, "nodeSelector": {"type": "object", "additionalProperties": true}, "tolerations": {"type": "array"}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "livenessProbe": {"type": "object", "additionalProperties": true, "required": ["enabled"], "if": {"properties": {"enabled": {"const": true}}}, "then": {"description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n"}}, "readinessProbe": {"type": "object", "additionalProperties": true, "required": ["enabled"], "if": {"properties": {"enabled": {"const": true}}}, "then": {"description": "This config option is like the k8s native specification of a\ncontainer probe, except that it also supports an `enabled` boolean\nflag.\n\nSee [the k8s\ndocumentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#probe-v1-core)\nfor more details.\n"}}, "resources": {"type": "object", "additionalProperties": true}, "defaultTarget": {"type": ["string", "null"]}, "errorTarget": {"type": ["string", "null"]}, "extraPodSpec": {"type": "object", "additionalProperties": true}}}, "secretToken": {"type": ["string", "null"]}, "service": {"type": "object", "additionalProperties": false, "properties": {"type": {"enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"]}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "nodePorts": {"type": "object", "additionalProperties": false, "properties": {"http": {"type": ["integer", "null"]}, "https": {"type": ["integer", "null"]}}}, "disableHttpPort": {"type": "boolean"}, "extraPorts": {"type": "array"}, "loadBalancerIP": {"type": ["string", "null"]}, "loadBalancerSourceRanges": {"type": "array"}}}, "https": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": ["boolean", "null"]}, "type": {"enum": [null, "", "letsencrypt", "manual", "offload", "secret"]}, "letsencrypt": {"type": "object", "additionalProperties": false, "properties": {"contactEmail": {"type": ["string", "null"]}, "acmeServer": {"type": ["string", "null"]}}}, "manual": {"type": "object", "additionalProperties": false, "properties": {"key": {"type": ["string", "null"]}, "cert": {"type": ["string", "null"]}}}, "secret": {"type": "object", "additionalProperties": false, "properties": {"name": {"type": ["string", "null"]}, "key": {"type": ["string", "null"]}, "crt": {"type": ["string", "null"]}}}, "hosts": {"type": "array"}}}, "traefik": {"type": "object", "additionalProperties": false, "properties": {"revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "networkPolicy": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "ingress": {"type": "array"}, "egress": {"type": "array"}, "egressAllowRules": {"type": "object", "additionalProperties": false, "properties": {"cloudMetadataServer": {"type": "boolean"}, "dnsPortsPrivateIPs": {"type": "boolean"}, "nonPrivateIPs": {"type": "boolean"}, "privateIPs": {"type": "boolean"}}}, "interNamespaceAccessLabels": {"enum": ["accept", "ignore"]}, "allowedIngressPorts": {"type": "array"}}}, "extraInitContainers": {"type": "array"}, "extraEnv": {"type": ["object", "array"], "additionalProperties": true}, "pdb": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "maxUnavailable": {"type": ["integer", "null"]}, "minAvailable": {"type": ["integer", "null"]}}}, "nodeSelector": {"type": "object", "additionalProperties": true}, "tolerations": {"type": "array"}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "extraDynamicConfig": {"type": "object", "additionalProperties": true}, "extraPorts": {"type": "array"}, "extraStaticConfig": {"type": "object", "additionalProperties": true}, "extraVolumes": {"type": "array"}, "extraVolumeMounts": {"type": "array"}, "hsts": {"type": "object", "additionalProperties": false, "required": ["includeSubdomains", "maxAge", "preload"], "properties": {"includeSubdomains": {"type": "boolean"}, "maxAge": {"type": "integer"}, "preload": {"type": "boolean"}}}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "resources": {"type": "object", "additionalProperties": true}, "serviceAccount": {"type": "object", "required": ["create"], "additionalProperties": false, "properties": {"create": {"type": "boolean"}, "name": {"type": ["string", "null"]}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}}}, "extraPodSpec": {"type": "object", "additionalProperties": true}}}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "deploymentStrategy": {"type": "object", "additionalProperties": false, "properties": {"rollingUpdate": {"type": ["string", "null"]}, "type": {"type": ["string", "null"]}}}, "secretSync": {"type": "object", "additionalProperties": false, "properties": {"containerSecurityContext": {"type": "object", "additionalProperties": true}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "resources": {"type": "object", "additionalProperties": true}}}}}, "singleuser": {"type": "object", "additionalProperties": false, "properties": {"networkPolicy": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "ingress": {"type": "array"}, "egress": {"type": "array"}, "egressAllowRules": {"type": "object", "additionalProperties": false, "properties": {"cloudMetadataServer": {"type": "boolean"}, "dnsPortsPrivateIPs": {"type": "boolean"}, "nonPrivateIPs": {"type": "boolean"}, "privateIPs": {"type": "boolean"}}}, "interNamespaceAccessLabels": {"enum": ["accept", "ignore"]}, "allowedIngressPorts": {"type": "array"}}}, "podNameTemplate": {"type": ["string", "null"]}, "cpu": {"type": "object", "additionalProperties": false, "properties": {"limit": {"type": ["number", "null"]}, "guarantee": {"type": ["number", "null"]}}}, "memory": {"type": "object", "additionalProperties": false, "properties": {"limit": {"type": ["number", "string", "null"]}, "guarantee": {"type": ["number", "string", "null"]}}}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "initContainers": {"type": "array"}, "profileList": {"type": "array"}, "extraFiles": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "object", "additionalProperties": false, "required": ["mountPath"], "oneOf": [{"required": ["data"]}, {"required": ["stringData"]}, {"required": ["binaryData"]}], "properties": {"mountPath": {"type": "string"}, "data": {"type": "object", "additionalProperties": true}, "stringData": {"type": "string"}, "binaryData": {"type": "string"}, "mode": {"type": "number"}}}}}, "extraEnv": {"type": ["object", "array"], "additionalProperties": true}, "nodeSelector": {"type": "object", "additionalProperties": true}, "extraTolerations": {"type": "array"}, "extraNodeAffinity": {"type": "object", "additionalProperties": false, "properties": {"required": {"type": "array"}, "preferred": {"type": "array"}}}, "extraPodAffinity": {"type": "object", "additionalProperties": false, "properties": {"required": {"type": "array"}, "preferred": {"type": "array"}}}, "extraPodAntiAffinity": {"type": "object", "additionalProperties": false, "properties": {"required": {"type": "array"}, "preferred": {"type": "array"}}}, "cloudMetadata": {"type": "object", "additionalProperties": false, "properties": {"blockWithIptables": {"type": "boolean"}, "ip": {"type": "string"}}}, "cmd": {"type": ["array", "string", "null"]}, "defaultUrl": {"type": ["string", "null"]}, "events": {"type": ["boolean", "null"]}, "extraAnnotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "extraContainers": {"type": "array"}, "extraLabels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "extraPodConfig": {"type": "object", "additionalProperties": true}, "extraResource": {"type": "object", "additionalProperties": false, "properties": {"guarantees": {"type": "object", "additionalProperties": true}, "limits": {"type": "object", "additionalProperties": true}}}, "fsGid": {"type": ["integer", "null"]}, "lifecycleHooks": {"type": "object", "additionalProperties": false, "properties": {"postStart": {"type": "object", "additionalProperties": true}, "preStop": {"type": "object", "additionalProperties": true}}}, "networkTools": {"type": "object", "additionalProperties": false, "properties": {"image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "resources": {"type": "object", "additionalProperties": true}}}, "serviceAccountName": {"type": ["string", "null"]}, "startTimeout": {"type": ["integer", "null"]}, "storage": {"type": "object", "additionalProperties": false, "required": ["type", "homeMountPath"], "properties": {"capacity": {"type": ["string", "null"]}, "dynamic": {"type": "object", "additionalProperties": false, "properties": {"pvcNameTemplate": {"type": ["string", "null"]}, "storageAccessModes": {"type": "array", "items": {"type": ["string", "null"]}}, "storageClass": {"type": ["string", "null"]}, "volumeNameTemplate": {"type": ["string", "null"]}}}, "extraLabels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "extraVolumeMounts": {"type": "array"}, "extraVolumes": {"type": "array"}, "homeMountPath": {"type": "string"}, "static": {"type": "object", "additionalProperties": false, "properties": {"pvcName": {"type": ["string", "null"]}, "subPath": {"type": ["string", "null"]}}}, "type": {"enum": ["dynamic", "static", "none"]}}}, "allowPrivilegeEscalation": {"type": ["boolean", "null"]}, "uid": {"type": ["integer", "null"]}}}, "scheduling": {"type": "object", "additionalProperties": false, "properties": {"userScheduler": {"type": "object", "additionalProperties": false, "required": ["enabled", "plugins", "pluginConfig", "logLevel"], "properties": {"enabled": {"type": "boolean"}, "revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "replicas": {"type": "integer"}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "pdb": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "maxUnavailable": {"type": ["integer", "null"]}, "minAvailable": {"type": ["integer", "null"]}}}, "nodeSelector": {"type": "object", "additionalProperties": true}, "tolerations": {"type": "array"}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "logLevel": {"type": "integer"}, "plugins": {"type": "object", "additionalProperties": true}, "pluginConfig": {"type": "array"}, "resources": {"type": "object", "additionalProperties": true}, "serviceAccount": {"type": "object", "required": ["create"], "additionalProperties": false, "properties": {"create": {"type": "boolean"}, "name": {"type": ["string", "null"]}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}}}, "extraPodSpec": {"type": "object", "additionalProperties": true}}}, "podPriority": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "globalDefault": {"type": "boolean"}, "defaultPriority": {"type": "integer"}, "imagePullerPriority": {"type": "integer"}, "userPlaceholderPriority": {"type": "integer"}}}, "userPlaceholder": {"type": "object", "additionalProperties": false, "properties": {"enabled": {"type": "boolean"}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "replicas": {"type": "integer"}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "resources": {"type": "object", "additionalProperties": true}, "containerSecurityContext": {"type": "object", "additionalProperties": true}}}, "corePods": {"type": "object", "additionalProperties": false, "properties": {"tolerations": {"type": "array"}, "nodeAffinity": {"type": "object", "additionalProperties": false, "properties": {"matchNodePurpose": {"enum": ["ignore", "prefer", "require"]}}}}}, "userPods": {"type": "object", "additionalProperties": false, "properties": {"tolerations": {"type": "array"}, "nodeAffinity": {"type": "object", "additionalProperties": false, "properties": {"matchNodePurpose": {"enum": ["ignore", "prefer", "require"]}}}}}}}, "ingress": {"type": "object", "additionalProperties": false, "required": ["enabled"], "properties": {"enabled": {"type": "boolean"}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "ingressClassName": {"type": ["string", "null"]}, "hosts": {"type": "array"}, "pathSuffix": {"type": ["string", "null"]}, "pathType": {"enum": ["Prefix", "Exact", "ImplementationSpecific"]}, "tls": {"type": "array"}}}, "prePuller": {"type": "object", "additionalProperties": false, "required": ["hook", "continuous"], "properties": {"revisionHistoryLimit": {"type": ["integer", "null"], "minimum": 0}, "labels": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}, "resources": {"type": "object", "additionalProperties": true}, "extraTolerations": {"type": "array"}, "hook": {"type": "object", "additionalProperties": false, "required": ["enabled"], "properties": {"enabled": {"type": "boolean"}, "pullOnlyOnChanges": {"type": "boolean"}, "podSchedulingWaitDuration": {"type": "integer"}, "nodeSelector": {"type": "object", "additionalProperties": true}, "tolerations": {"type": "array"}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}, "resources": {"type": "object", "additionalProperties": true}, "serviceAccount": {"type": "object", "required": ["create"], "additionalProperties": false, "properties": {"create": {"type": "boolean"}, "name": {"type": ["string", "null"]}, "annotations": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "string"}}}}}}}, "continuous": {"type": "object", "additionalProperties": false, "required": ["enabled"], "properties": {"enabled": {"type": "boolean"}}}, "pullProfileListImages": {"type": "boolean"}, "extraImages": {"type": "object", "additionalProperties": false, "patternProperties": {".*": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}}}}}, "containerSecurityContext": {"type": "object", "additionalProperties": true}, "pause": {"type": "object", "additionalProperties": false, "properties": {"containerSecurityContext": {"type": "object", "additionalProperties": true}, "image": {"type": "object", "additionalProperties": false, "required": ["name", "tag"], "properties": {"name": {"type": "string"}, "tag": {"type": "string"}, "pullPolicy": {"enum": [null, "", "IfNotPresent", "Always", "Never"]}, "pullSecrets": {"type": "array"}}}}}}}, "custom": {"type": "object", "additionalProperties": true}, "cull": {"type": "object", "additionalProperties": false, "required": ["enabled"], "properties": {"enabled": {"type": "boolean"}, "users": {"type": ["boolean", "null"]}, "adminUsers": {"type": ["boolean", "null"]}, "removeNamedServers": {"type": ["boolean", "null"]}, "timeout": {"type": ["integer", "null"]}, "every": {"type": ["integer", "null"]}, "concurrency": {"type": ["integer", "null"]}, "maxAge": {"type": ["integer", "null"]}}}, "debug": {"type": "object", "additionalProperties": false, "required": ["enabled"], "properties": {"enabled": {"type": "boolean"}}}, "rbac": {"type": "object", "additionalProperties": false, "required": ["create"], "properties": {"enabled": {"type": "boolean"}, "create": {"type": "boolean"}}}, "global": {"type": "object", "additionalProperties": true, "properties": {"safeToShowValues": {"type": "boolean"}}}}}
diff --git a/sk-k8s/cluster-role-binding.yaml b/sk-k8s/cluster-role-binding.yaml
new file mode 100644
index 0000000..e929f32
--- /dev/null
+++ b/sk-k8s/cluster-role-binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: jupyter-rbac
+subjects:
+ - kind: ServiceAccount
+ name: hub
+ namespace: jupyter
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
diff --git a/sk-k8s/job.yaml b/sk-k8s/job.yaml
new file mode 100644
index 0000000..c122f07
--- /dev/null
+++ b/sk-k8s/job.yaml
@@ -0,0 +1,26 @@
+apiVersion: batch/v1
+kind: Job
+
+metadata:
+ name: hub-content-init
+ namespace: jupyter
+spec:
+ template:
+ spec:
+ containers:
+ - name: hub-content-init
+ image: ubuntu:22.04
+ command: ["/bin/bash", "-c"]
+ args: ["/init/init.sh"]
+ volumeMounts:
+ - name: init-script-volume
+ mountPath: /init/init.sh
+ readOnly: true
+ subPath: init.sh
+ restartPolicy: Never
+ volumes:
+ - name: init-script-volume
+ configMap:
+ defaultMode: 0700
+ name: init-script-configmap
+ backoffLimit: 4
diff --git a/sk-k8s/script.yaml b/sk-k8s/script.yaml
new file mode 100644
index 0000000..49bed58
--- /dev/null
+++ b/sk-k8s/script.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: init-script-configmap
+ namespace: jupyter
+data:
+ init.sh: |
+ #!/bin/bash
+ apt update
+ apt install -y jq curl
+
+ token=`curl -X POST -d '{"auth": {"username": "jovyan", "token": "12345"}}' http://hub.jupyter.svc.cluster.local:8081/hub/api/users/jovyan/tokens | jq -r '.token'`
+
+ curl --header "Authorization: Bearer $token" http://hub.jupyter.svc.cluster.local:8081/hub/api/groups
+
+ # create groups
+ for group in group-a group-b group-c
+ do
+ curl --request POST --location http://hub.jupyter.svc.cluster.local:8081/hub/api/groups/${group} --header "Authorization: Bearer $token" --header 'Content-Type: application/json'
+ # add user to group
+ curl --request POST --location http://hub.jupyter.svc.cluster.local:8081/hub/api/groups/${group}/users --header "Authorization: Bearer $token" --header 'Content-Type: application/json' --data '{"users": ["jovyan"]}'
+ done
diff --git a/skaffold.yaml b/skaffold.yaml
new file mode 100644
index 0000000..7cca29e
--- /dev/null
+++ b/skaffold.yaml
@@ -0,0 +1,32 @@
+apiVersion: skaffold/v4beta9
+kind: Config
+build:
+ artifacts:
+ - image: hubimage
+
+profiles:
+ - name: reference
+ deploy:
+ helm:
+ releases:
+ - name: jupyterhub
+ chartPath: jupyterhub
+ namespace: jupyter
+ createNamespace: true
+ valuesFiles:
+ - jupyterhub/values-minikube.yaml
+ setValueTemplates:
+ hub.image.name: "{{.IMAGE_REPO_hubimage}}/{{.IMAGE_TAG_hubimage}}@{{.IMAGE_DIGEST_hubimage}}"
+ manifests:
+ rawYaml:
+ - sk-k8s/cluster-role-binding.yaml
+ - sk-k8s/script.yaml
+ - sk-k8s/job.yaml
+
+
+portForward:
+- resourceType: service
+ resourceName: proxy-public
+ namespace: jupyter # Optional, if you are using a specific namespace
+ port: 80 # Target port on the pod
+ localPort: 8000 # Local port on your machine
diff --git a/validate_config.py b/validate_config.py
deleted file mode 100644
index f320633..0000000
--- a/validate_config.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from application_hub_context.parser import ConfigParser
-
-ws_config_parser = ConfigParser.read_file(
- config_path="jupyterhub/files/hub/config.yml", user_groups=["group-2"]
-)
-
-print(ws_config_parser.get_profile_by_slug(slug="ellip_studio_labs").dict())
-
-print(ws_config_parser.get_profile_config_maps(profile_id="profile_studio_labs"))