forked from cilium/gke
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcreate-gke-cluster.sh
executable file
·66 lines (51 loc) · 3.47 KB
/
create-gke-cluster.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#!/bin/bash -e
if [ -z "$ADMIN_USER" ]; then
echo "ADMIN_USER is not set"
exit 1
fi
CLUSTER_NAME="${CLUSTER_NAME:-default-cluster}"
GKE_PROJECT="${GKE_PROJECT:-default-project}"
GKE_REGION="${GKE_REGION:-europe-north1}"
GKE_ZONE="${GKE_ZONE:--a}"
IMAGE_TYPE="${IMAGE_TYPE:-COS}"
NUM_NODES="${NUM_NODES:-3}"
default_version=$(gcloud container get-server-config --project $GKE_PROJECT --zone europe-north1-a | grep 1.11 | head -n 1 | awk '{print $2}')
GKE_VERSION=${GKE_VERSION:-$default_version}
case "$IMAGE_TYPE" in
"COS") bpf_mount=/etc/systemd/system/ ;;
"UBUNTU") bpf_mount=/lib/systemd/system/ ;;
esac
gcloud beta container --project $GKE_PROJECT clusters create $CLUSTER_NAME --zone $GKE_REGION$GKE_ZONE --username "admin" --cluster-version $GKE_VERSION --machine-type "n1-standard-1" --image-type $IMAGE_TYPE --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" --num-nodes $NUM_NODES --no-enable-cloud-logging --no-enable-cloud-monitoring --network "projects/$GKE_PROJECT/global/networks/default" --subnetwork "projects/$GKE_PROJECT/regions/$GKE_REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --no-enable-autoupgrade --no-enable-autorepair
gcloud container clusters get-credentials $CLUSTER_NAME --zone $GKE_REGION$GKE_ZONE --project $GKE_PROJECT
echo "Waiting for Kubernetes cluster to become ready..."
until kubectl get pods; do sleep 1; done
echo "Enabling CNI configuration..."
INSTANCES=$(gcloud compute instances --project $GKE_PROJECT list | grep $CLUSTER_NAME | awk '{print $1}')
for INSTANCE in $INSTANCES; do
FLAGS="--zone $GKE_REGION$GKE_ZONE --project $GKE_PROJECT"
gcloud compute scp sys-fs-bpf.mount ${INSTANCE}:/tmp/sys-fs-bpf.mount $FLAGS
gcloud compute ssh $INSTANCE $FLAGS -- sudo mv /tmp/sys-fs-bpf.mount $bpf_mount
gcloud compute ssh $INSTANCE $FLAGS -- sudo systemctl enable sys-fs-bpf.mount
gcloud compute ssh $INSTANCE $FLAGS -- sudo systemctl start sys-fs-bpf.mount
gcloud compute ssh $INSTANCE $FLAGS -- sudo sed -i "s:--network-plugin=kubenet:--network-plugin=cni\ --cni-bin-dir=/home/kubernetes/bin:g" /etc/default/kubelet
gcloud compute ssh $INSTANCE $FLAGS -- sudo systemctl restart kubelet
gcloud compute scp 04-cilium-cni.conf ${INSTANCE}:/tmp/04-cilium-cni.conf $FLAGS
gcloud compute ssh $INSTANCE $FLAGS -- sudo mkdir -p /etc/cni/net.d/
gcloud compute ssh $INSTANCE $FLAGS -- sudo cp /tmp/04-cilium-cni.conf /etc/cni/net.d/04-cilium-cni.conf
done
echo "Installing Cilium..."
kubectl create ns cilium
kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user $ADMIN_USER
kubectl create -f cilium-deployment.yaml
echo "Restarting kube-dns-autoscaler..."
kubectl -n kube-system delete pod -l k8s-app=kube-dns-autoscaler
echo "Restarting kube-dns..."
kubectl -n kube-system delete pod -l k8s-app=kube-dns
echo "Restarting l7-default-backend..."
kubectl -n kube-system delete pod -l k8s-app=glbc
echo "Restarting heapster..."
kubectl -n kube-system delete pod -l k8s-app=heapster
echo "Restarting metrics-server..."
kubectl -n kube-system delete pod -l k8s-app=metrics-server
echo "Waiting for cilium to become ready..."
until kubectl wait --for=condition=Ready --selector k8s-app=cilium -n cilium pod; do sleep 1; done