forked from jupyterhub/zero-to-jupyterhub-k8s
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalues.yaml
660 lines (648 loc) · 19.4 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
# fullnameOverride and nameOverride distinguishes blank strings, null values,
# and non-blank strings. For more details, see the configuration reference.
fullnameOverride: ""
nameOverride:
# custom can contain anything you want to pass to the hub pod, as all passed
# Helm template values will be made available there.
custom: {}
# imagePullSecret is configuration to create a k8s Secret that Helm chart's pods
# can get credentials from to pull their images.
imagePullSecret:
create: false
automaticReferenceInjection: true
registry:
username:
password:
email:
# imagePullSecrets is configuration to reference the k8s Secret resources the
# Helm chart's pods can get credentials from to pull their images.
imagePullSecrets: []
# hub relates to the hub pod, responsible for running JupyterHub, its configured
# Authenticator class KubeSpawner, and its configured Proxy class
# ConfigurableHTTPProxy. KubeSpawner creates the user pods, and
# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
# the proxy pod.
hub:
revisionHistoryLimit:
config:
JupyterHub:
admin_access: true
authenticator_class: dummy
service:
type: ClusterIP
annotations: {}
ports:
nodePort:
extraPorts: []
loadBalancerIP:
baseUrl: /
cookieSecret:
initContainers: []
nodeSelector: {}
tolerations: []
concurrentSpawnLimit: 64
consecutiveFailureLimit: 5
activeServerLimit:
deploymentStrategy:
## type: Recreate
## - sqlite-pvc backed hubs require the Recreate deployment strategy as a
## typical PVC storage can only be bound to one pod at the time.
## - JupyterHub isn't designed to support being run in parallell. More work
## needs to be done in JupyterHub itself for a fully highly available (HA)
## deployment of JupyterHub on k8s is to be possible.
type: Recreate
db:
type: sqlite-pvc
upgrade:
pvc:
annotations: {}
selector: {}
accessModes:
- ReadWriteOnce
storage: 1Gi
subPath:
storageClassName:
url:
password:
labels: {}
annotations: {}
command: []
args: []
extraConfig: {}
extraFiles: {}
extraEnv: {}
extraContainers: []
extraVolumes: []
extraVolumeMounts: []
image:
name: jupyterhub/k8s-hub
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
resources: {}
podSecurityContext:
fsGroup: 1000
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
lifecycle: {}
loadRoles: {}
services: {}
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
allowNamedServers: false
namedServerLimitPerUser:
authenticatePrometheus:
redirectToServer:
shutdownOnLogout:
templatePaths: []
templateVars: {}
livenessProbe:
# The livenessProbe's aim to give JupyterHub sufficient time to startup but
# be able to restart if it becomes unresponsive for ~5 min.
enabled: true
initialDelaySeconds: 300
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
# The readinessProbe's aim is to provide a successful startup indication,
# but following that never become unready before its livenessProbe fail and
# restarts it if needed. To become unready following startup serves no
# purpose as there are no other pod to fallback to in our non-HA deployment.
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
existingSecret:
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
rbac:
create: true
# proxy relates to the proxy pod, the proxy-public service, and the autohttps
# pod and proxy-http service.
proxy:
secretToken:
annotations: {}
deploymentStrategy:
## type: Recreate
## - JupyterHub's interaction with the CHP proxy becomes a lot more robust
## with this configuration. To understand this, consider that JupyterHub
## during startup will interact a lot with the k8s service to reach a
## ready proxy pod. If the hub pod during a helm upgrade is restarting
## directly while the proxy pod is making a rolling upgrade, the hub pod
## could end up running a sequence of interactions with the old proxy pod
## and finishing up the sequence of interactions with the new proxy pod.
## As CHP proxy pods carry individual state this is very error prone. One
## outcome when not using Recreate as a strategy has been that user pods
## have been deleted by the hub pod because it considered them unreachable
## as it only configured the old proxy pod but not the new before trying
## to reach them.
type: Recreate
## rollingUpdate:
## - WARNING:
## This is required to be set explicitly blank! Without it being
## explicitly blank, k8s will let eventual old values under rollingUpdate
## remain and then the Deployment becomes invalid and a helm upgrade would
## fail with an error like this:
##
## UPGRADE FAILED
## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
rollingUpdate:
# service relates to the proxy-public service
service:
type: LoadBalancer
labels: {}
annotations: {}
nodePorts:
http:
https:
disableHttpPort: false
extraPorts: []
loadBalancerIP:
loadBalancerSourceRanges: []
# chp relates to the proxy pod, which is responsible for routing traffic based
# on dynamic configuration sent from JupyterHub to CHP's REST API.
chp:
revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "4.5.3" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy:
pullSecrets: []
extraCommandLineFlags: []
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
failureThreshold: 30
timeoutSeconds: 3
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
resources: {}
defaultTarget:
errorTarget:
extraEnv: {}
nodeSelector: {}
tolerations: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
extraPodSpec: {}
# traefik relates to the autohttps pod, which is responsible for TLS
# termination when proxy.https.type=letsencrypt.
traefik:
revisionHistoryLimit:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: traefik
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "v2.9.1" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
includeSubdomains: false
preload: false
maxAge: 15724800 # About 6 months
resources: {}
labels: {}
extraInitContainers: []
extraEnv: {}
extraVolumes: []
extraVolumeMounts: []
extraStaticConfig: {}
extraDynamicConfig: {}
nodeSelector: {}
tolerations: []
extraPorts: []
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: true
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: true
interNamespaceAccessLabels: ignore
allowedIngressPorts: [http, https]
pdb:
enabled: false
maxUnavailable:
minAvailable: 1
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
secretSync:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: jupyterhub/k8s-secret-sync
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
resources: {}
labels: {}
https:
enabled: false
type: letsencrypt
#type: letsencrypt, manual, offload, secret
letsencrypt:
contactEmail:
# Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
acmeServer: https://acme-v02.api.letsencrypt.org/directory
manual:
key:
cert:
secret:
name:
key: tls.key
crt: tls.crt
hosts: []
# singleuser relates to the configuration of KubeSpawner which runs in the hub
# pod, and its spawning of user pods such as jupyter-myusername.
singleuser:
podNameTemplate:
extraTolerations: []
nodeSelector: {}
extraNodeAffinity:
required: []
preferred: []
extraPodAffinity:
required: []
preferred: []
extraPodAntiAffinity:
required: []
preferred: []
networkTools:
image:
name: jupyterhub/k8s-network-tools
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
resources: {}
cloudMetadata:
# block set to true will append a privileged initContainer using the
# iptables to block the sensitive metadata server at the provided ip.
blockWithIptables: true
ip: 169.254.169.254
networkPolicy:
enabled: true
ingress: []
egress: []
egressAllowRules:
cloudMetadataServer: false
dnsPortsPrivateIPs: true
nonPrivateIPs: true
privateIPs: false
interNamespaceAccessLabels: ignore
allowedIngressPorts: []
events: true
extraAnnotations: {}
extraLabels:
hub.jupyter.org/network-access-hub: "true"
extraFiles: {}
extraEnv: {}
lifecycleHooks: {}
initContainers: []
extraContainers: []
allowPrivilegeEscalation: false
uid: 1000
fsGid: 100
serviceAccountName:
storage:
type: dynamic
extraLabels: {}
extraVolumes: []
extraVolumeMounts: []
static:
pvcName:
subPath: "{username}"
capacity: 10Gi
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
image:
name: jupyterhub/k8s-singleuser-sample
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
startTimeout: 300
cpu:
limit:
guarantee:
memory:
limit:
guarantee: 1G
extraResource:
limits: {}
guarantees: {}
cmd: jupyterhub-singleuser
defaultUrl:
extraPodConfig: {}
profileList: []
# scheduling relates to the user-scheduler pods and user-placeholder pods.
scheduling:
userScheduler:
enabled: true
revisionHistoryLimit:
replicas: 2
logLevel: 4
# plugins are configured on the user-scheduler to make us score how we
# schedule user pods in a way to help us schedule on the most busy node. By
# doing this, we help scale down more effectively. It isn't obvious how to
# enable/disable scoring plugins, and configure them, to accomplish this.
#
# plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
# migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
#
plugins:
score:
# These scoring plugins are enabled by default according to
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
# 2022-02-22.
#
# Enabled with high priority:
# - NodeAffinity
# - InterPodAffinity
# - NodeResourcesFit
# - ImageLocality
# Remains enabled with low default priority:
# - TaintToleration
# - PodTopologySpread
# - VolumeBinding
# Disabled for scoring:
# - NodeResourcesBalancedAllocation
#
disabled:
# We disable these plugins (with regards to scoring) to not interfere
# or complicate our use of NodeResourcesFit.
- name: NodeResourcesBalancedAllocation
# Disable plugins to be allowed to enable them again with a different
# weight and avoid an error.
- name: NodeAffinity
- name: InterPodAffinity
- name: NodeResourcesFit
- name: ImageLocality
enabled:
- name: NodeAffinity
weight: 14631
- name: InterPodAffinity
weight: 1331
- name: NodeResourcesFit
weight: 121
- name: ImageLocality
weight: 11
pluginConfig:
# Here we declare that we should optimize pods to fit based on a
# MostAllocated strategy instead of the default LeastAllocated.
- name: NodeResourcesFit
args:
scoringStrategy:
resources:
- name: cpu
weight: 1
- name: memory
weight: 1
type: MostAllocated
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
# IMPORTANT: Bumping the minor version of this binary should go hand in
# hand with an inspection of the user-scheduelrs RBAC resources
# that we have forked in
# templates/scheduling/user-scheduler/rbac.yaml.
#
# Debugging advice:
#
# - Is configuration of kube-scheduler broken in
# templates/scheduling/user-scheduler/configmap.yaml?
#
# - Is the kube-scheduler binary's compatibility to work
# against a k8s api-server that is too new or too old?
#
# - You can update the GitHub workflow that runs tests to
# include "deploy/user-scheduler" in the k8s namespace report
# and reduce the user-scheduler deployments replicas to 1 in
# dev-config.yaml to get relevant logs from the user-scheduler
# pods. Inspect the "Kubernetes namespace report" action!
#
# - Typical failures are that kube-scheduler fails to search for
# resources via its "informers", and won't start trying to
# schedule pods before they succeed which may require
# additional RBAC permissions or that the k8s api-server is
# aware of the resources.
#
# - If "successfully acquired lease" can be seen in the logs, it
# is a good sign kube-scheduler is ready to schedule pods.
#
name: k8s.gcr.io/kube-scheduler
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. The minor version is pinned in the
# workflow, and should be updated there if a minor version bump is done
# here.
#
tag: "v1.23.12" # ref: https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md
pullPolicy:
pullSecrets: []
nodeSelector: {}
tolerations: []
labels: {}
annotations: {}
pdb:
enabled: true
maxUnavailable: 1
minAvailable:
resources: {}
serviceAccount:
create: true
name:
annotations: {}
extraPodSpec: {}
podPriority:
enabled: false
globalDefault: false
defaultPriority: 0
imagePullerPriority: -5
userPlaceholderPriority: -10
userPlaceholder:
enabled: true
image:
name: k8s.gcr.io/pause
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
# If you update this, also update prePuller.pause.image.tag
#
tag: "3.8"
pullPolicy:
pullSecrets: []
revisionHistoryLimit:
replicas: 0
labels: {}
annotations: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
resources: {}
corePods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: core
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: core
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
userPods:
tolerations:
- key: hub.jupyter.org/dedicated
operator: Equal
value: user
effect: NoSchedule
- key: hub.jupyter.org_dedicated
operator: Equal
value: user
effect: NoSchedule
nodeAffinity:
matchNodePurpose: prefer
# prePuller relates to the hook|continuous-image-puller DaemonsSets
prePuller:
revisionHistoryLimit:
labels: {}
annotations: {}
resources: {}
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook:
enabled: true
pullOnlyOnChanges: true
# image and the configuration below relates to the hook-image-awaiter Job
image:
name: jupyterhub/k8s-image-awaiter
tag: "set-by-chartpress"
pullPolicy:
pullSecrets: []
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
podSchedulingWaitDuration: 10
nodeSelector: {}
tolerations: []
resources: {}
serviceAccount:
create: true
name:
annotations: {}
continuous:
enabled: true
pullProfileListImages: true
extraImages: {}
pause:
containerSecurityContext:
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
image:
name: k8s.gcr.io/pause
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
# If you update this, also update scheduling.userPlaceholder.image.tag
#
tag: "3.8"
pullPolicy:
pullSecrets: []
ingress:
enabled: false
annotations: {}
ingressClassName:
hosts: []
pathSuffix:
pathType: Prefix
tls: []
# cull relates to the jupyterhub-idle-culler service, responsible for evicting
# inactive singleuser pods.
#
# The configuration below, except for enabled, corresponds to command-line flags
# for jupyterhub-idle-culler as documented here:
# https://github.com/jupyterhub/jupyterhub-idle-culler#as-a-standalone-script
#
cull:
enabled: true
users: false # --cull-users
adminUsers: true # --cull-admin-users
removeNamedServers: false # --remove-named-servers
timeout: 3600 # --timeout
every: 600 # --cull-every
concurrency: 10 # --concurrency
maxAge: 0 # --max-age
debug:
enabled: false
global:
safeToShowValues: false