-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathcluster.sh
executable file
·337 lines (281 loc) · 7.79 KB
/
cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
#!/usr/bin/env bash
set -e
# CONSTANTS
readonly KIND_NODE_IMAGE=kindest/node:v1.23.3
readonly DNSMASQ_DOMAIN=kind.cluster
readonly DNSMASQ_CONF=kind.k8s.conf
# FUNCTIONS
log(){
echo "---------------------------------------------------------------------------------------"
echo $1
echo "---------------------------------------------------------------------------------------"
}
wait_ready(){
local NAME=${1:-pods}
local TIMEOUT=${2:-5m}
local SELECTOR=${3:---all}
log "WAIT $NAME ($TIMEOUT) ..."
kubectl wait -A --timeout=$TIMEOUT --for=condition=ready $NAME $SELECTOR
}
wait_pods_ready(){
local TIMEOUT=${1:-5m}
wait_ready pods $TIMEOUT --field-selector=status.phase!=Succeeded
}
wait_nodes_ready(){
local TIMEOUT=${1:-5m}
wait_ready nodes $TIMEOUT
}
network(){
local NAME=${1:-kind}
log "NETWORK (kind) ..."
if [ -z $(docker network ls --filter name=^$NAME$ --format="{{ .Name }}") ]
then
docker network create $NAME
echo "Network $NAME created"
else
echo "Network $NAME already exists, skipping"
fi
}
proxy(){
local NAME=$1
local TARGET=$2
if [ -z $(docker ps --filter name=^proxy-gcr$ --format="{{ .Names }}") ]
then
docker run -d --name $NAME --restart=always --net=kind -e REGISTRY_PROXY_REMOTEURL=$TARGET registry:2
echo "Proxy $NAME (-> $TARGET) created"
else
echo "Proxy $NAME already exists, skipping"
fi
}
proxies(){
log "REGISTRY PROXIES ..."
proxy proxy-docker-hub https://registry-1.docker.io
proxy proxy-quay https://quay.io
proxy proxy-gcr https://gcr.io
proxy proxy-k8s-gcr https://k8s.gcr.io
}
get_service_lb_ip(){
kubectl get svc -n $1 $2 -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
}
get_subnet(){
docker network inspect -f '{{(index .IPAM.Config 0).Subnet}}' $1
}
subnet_to_ip(){
echo $1 | sed "[email protected]/16@$2@"
}
root_ca(){
log "ROOT CERTIFICATE ..."
mkdir -p .ssl
if [[ -f ".ssl/root-ca.pem" && -f ".ssl/root-ca-key.pem" ]]
then
echo "Root certificate already exists, skipping"
else
openssl genrsa -out .ssl/root-ca-key.pem 2048
openssl req -x509 -new -nodes -key .ssl/root-ca-key.pem -days 3650 -sha256 -out .ssl/root-ca.pem -subj "/CN=kube-ca"
echo "Root certificate created"
fi
}
install_ca(){
log "INSTALL CERTIFICATE AUTHORITY ..."
sudo mkdir -p /usr/local/share/ca-certificates/kind.cluster
sudo cp -f .ssl/root-ca.pem /usr/local/share/ca-certificates/kind.cluster/ca.crt
sudo update-ca-certificates
}
cluster(){
local NAME=${1:-kind}
log "CLUSTER ..."
docker pull $KIND_NODE_IMAGE
kind create cluster --name $NAME --image $KIND_NODE_IMAGE --config - <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
disableDefaultCNI: true
kubeProxyMode: none
kubeadmConfigPatches:
- |-
kind: ClusterConfiguration
apiServer:
extraVolumes:
- name: opt-ca-certificates
hostPath: /opt/ca-certificates/root-ca.pem
mountPath: /opt/ca-certificates/root-ca.pem
readOnly: true
pathType: File
extraArgs:
oidc-client-id: kube
oidc-issuer-url: https://keycloak.kind.cluster/auth/realms/master
oidc-username-claim: email
oidc-groups-claim: groups
oidc-ca-file: /opt/ca-certificates/root-ca.pem
controllerManager:
extraArgs:
bind-address: 0.0.0.0
etcd:
local:
extraArgs:
listen-metrics-urls: http://0.0.0.0:2381
scheduler:
extraArgs:
bind-address: 0.0.0.0
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["http://proxy-docker-hub:5000"]
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
endpoint = ["http://proxy-quay:5000"]
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["http://proxy-k8s-gcr:5000"]
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
endpoint = ["http://proxy-gcr:5000"]
nodes:
- role: control-plane
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
- role: control-plane
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
- role: control-plane
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
- role: worker
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
- role: worker
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
- role: worker
extraMounts:
- hostPath: $PWD/.ssl/root-ca.pem
containerPath: /opt/ca-certificates/root-ca.pem
readOnly: true
EOF
}
cilium(){
log "CILIUM ..."
helm upgrade --install --wait --timeout 15m --atomic --namespace kube-system --create-namespace \
--repo https://helm.cilium.io cilium cilium --values - <<EOF
kubeProxyReplacement: strict
k8sServiceHost: kind-external-load-balancer
k8sServicePort: 6443
hostServices:
enabled: true
externalIPs:
enabled: true
nodePort:
enabled: true
hostPort:
enabled: true
image:
pullPolicy: IfNotPresent
ipam:
mode: kubernetes
hubble:
enabled: true
relay:
enabled: true
ui:
enabled: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
hosts:
- hubble-ui.$DNSMASQ_DOMAIN
tls:
- secretName: hubble-ui.$DNSMASQ_DOMAIN
hosts:
- hubble-ui.$DNSMASQ_DOMAIN
EOF
}
cert_manager(){
log "CERT MANAGER ..."
helm upgrade --install --wait --timeout 15m --atomic --namespace cert-manager --create-namespace \
--repo https://charts.jetstack.io cert-manager cert-manager --values - <<EOF
installCRDs: true
EOF
}
cert_manager_ca_secret(){
kubectl delete secret -n cert-manager root-ca || true
kubectl create secret tls -n cert-manager root-ca --cert=.ssl/root-ca.pem --key=.ssl/root-ca-key.pem
}
cert_manager_ca_issuer(){
kubectl apply -n cert-manager -f - <<EOF
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: ca-issuer
spec:
ca:
secretName: root-ca
EOF
}
metallb(){
log "METALLB ..."
local KIND_SUBNET=$(get_subnet kind)
local METALLB_START=$(subnet_to_ip $KIND_SUBNET 255.200)
local METALLB_END=$(subnet_to_ip $KIND_SUBNET 255.250)
helm upgrade --install --wait --timeout 15m --atomic --namespace metallb-system --create-namespace \
--repo https://metallb.github.io/metallb metallb metallb --values - <<EOF
configInline:
address-pools:
- name: default
protocol: layer2
addresses:
- $METALLB_START-$METALLB_END
EOF
}
ingress(){
log "INGRESS-NGINX ..."
helm upgrade --install --wait --timeout 15m --atomic --namespace ingress-nginx --create-namespace \
--repo https://kubernetes.github.io/ingress-nginx ingress-nginx ingress-nginx --values - <<EOF
defaultBackend:
enabled: true
EOF
}
dnsmasq(){
log "DNSMASQ ..."
local INGRESS_LB_IP=$(get_service_lb_ip ingress-nginx ingress-nginx-controller)
echo "address=/$DNSMASQ_DOMAIN/$INGRESS_LB_IP" | sudo tee /etc/dnsmasq.d/$DNSMASQ_CONF
}
restart_service(){
log "RESTART $1 ..."
sudo systemctl restart $1
}
cleanup(){
log "CLEANUP ..."
kind delete cluster || true
sudo rm -f /etc/dnsmasq.d/$DNSMASQ_CONF
sudo rm -rf /usr/local/share/ca-certificates/kind.cluster
}
# RUN
cleanup
network
proxies
root_ca
install_ca
cluster
cilium
cert_manager
cert_manager_ca_secret
cert_manager_ca_issuer
metallb
ingress
dnsmasq
restart_service dnsmasq
# DONE
log "CLUSTER READY !"
echo "HUBBLE UI: https://hubble-ui.$DNSMASQ_DOMAIN"