-
Notifications
You must be signed in to change notification settings - Fork 1
/
masters.tf
executable file
·189 lines (179 loc) · 8.27 KB
/
masters.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
resource "random_password" "k3s-token" {
length = 64
special = false
override_special = "_%@"
}
locals {
helm_flags = var.cilium_helm_flags != "" ? "--set ${var.cilium_helm_flags}" : ""
k3ver = var.k3s_version == "" ? "INSTALL_K3S_CHANNEL=stable" : "INSTALL_K3S_VERSION=${var.k3s_version}"
cil_vers = var.cilium_version == "" ? "" : "--version ${var.cilium_version}"
masters_count = var.ha_control_plane == false ? 1 : 3
start_ip_master = var.masters.master_start_index != "" ? var.masters.master_start_index : (cidrhost(var.masters.subnet, 1) == var.masters.gw ? element(split(".", cidrhost(var.masters.subnet, 1)), 3) + 1 : element(split(".", cidrhost(var.masters.subnet, 1)), 3))
local_masters = { for i, v in range(local.masters_count) : v => {
name = "${var.cluster_name}-control-node-${i}"
pool = var.masters.pool
cores = var.masters.cores
memory = var.masters.memory
bridge = var.masters.bridge
tag = var.masters.tag
tags = join(" ", concat([for i in var.masters.tags : i], ["cluster-${var.cluster_name}"], ["control-node"]))
ipconfig0 = "ip=${cidrhost(var.masters.subnet, local.start_ip_master + i)}/${element(split("/", var.masters.subnet), 1)},gw=${var.masters.gw}"
scsihw = var.masters.scsihw
disks = var.masters.disks
image = var.masters.image
ssh_user = var.masters.ssh_user
user_password = var.masters.user_password
ssh_keys = var.masters.ssh_keys
}
}
vip = var.api_vip == "" ? "${cidrhost(var.masters.subnet, local.start_ip_master)}" : var.api_vip
}
resource "proxmox_vm_qemu" "master" {
for_each = { for key, value in local.local_masters : value.name => value }
name = each.value.name
desc = "k3s Master node - Terraform managed"
define_connection_info = "true"
clone = each.value.image
agent = "1"
os_type = "cloud-init"
boot = var.vm_boot
tags = each.value.tags
oncreate = "true"
onboot = "true"
sshkeys = each.value.ssh_keys
cipassword = each.value.user_password
searchdomain = var.cloudinit_search_domain
nameserver = var.cloudinit_nameserver
ipconfig0 = each.value.ipconfig0
ssh_user = each.value.ssh_user
target_node = var.masters.node != "" ? var.masters.node : element(random_shuffle.random_node.result, substr(each.key, -1, -1))
pool = each.value.pool
cores = each.value.cores
sockets = var.vm_sockets
cpu = var.vm_cpu_type
memory = each.value.memory
scsihw = each.value.scsihw
vga {
type = "qxl"
memory = 0
}
dynamic "disk" {
for_each = each.value.disks
content {
slot = disk.value.id
size = disk.value.size
storage = disk.value.storage
type = disk.value.type
ssd = disk.value.ssd
discard = disk.value.discard
}
}
network {
model = "virtio"
bridge = each.value.bridge
tag = each.value.tag
queues = each.value.cores
}
serial {
id = 0
type = "socket"
}
provisioner "file" {
destination = "/tmp/config.yaml"
content = templatefile("${path.module}/templates/k3s/server.yaml", {
node-ip = self.ssh_host
sans = var.k3s_sans == null ? ["${local.vip}"] : distinct(concat(var.k3s_sans, ["${local.vip}"]))
node_taints = var.k3s_master_node_taints
flannel_backend = var.cilium == true ? "none" : var.k3s_flannel_backend
disable = var.k3s_disable
cluster_cidr = var.k3s_cluster_cidr
service_cidr = var.k3s_service_cidr
net_pol_disable = var.cilium == true ? true : var.k3s_network_policy_disable
cloud_contr_disable = var.k3s_cloud_controller_disable
kube_proxy_disable = var.cilium == true ? true : var.k3s_kube_proxy_disable
secrets_encryption_enable = var.k3s_secrets_encryption_enable
write_kube_perm = var.k3s_write_kubeconfig_mode
master_kubelet_args = var.k3s_master_kubelet_args
kube_control_manag_args = var.k3s_kube_control_manag_args
kube_proxy_args = var.k3s_kube_proxy_args
kube_sched_args = var.k3s_kube_sched_args
kube_apiserver_args = var.k3s_kube_apiserver_args
master_node_taints = var.k3s_master_node_taints
master_node_labels = var.k3s_master_node_labels
snapshotter = var.k3s_snapshotter
})
connection {
type = "ssh"
host = self.ssh_host
user = self.ssh_user
private_key = var.private_ssh_key
}
}
provisioner "file" {
destination = "/tmp/kube-vip.yaml"
content = templatefile("${path.module}/templates/k3s/kube-vip.yaml", {
vip = var.api_vip
dev = var.kube_vip_dev
})
connection {
type = "ssh"
host = self.ssh_host
user = self.ssh_user
private_key = var.private_ssh_key
}
}
provisioner "remote-exec" {
# Checks if it is running on first master,if true then init's the first master,else does exit without error.
inline = [
"if ip a | grep inet | awk '{print $2}' | grep -w ${cidrhost(var.masters.subnet, local.start_ip_master)};then : ;else exit 0;fi",
"sudo mkdir -p /etc/rancher/k3s",
"sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml",
"curl -sfL https://get.k3s.io | sudo ${local.k3ver} sh -s - server --cluster-init --token ${random_password.k3s-token.result}",
"mkdir /home/$${USER}/.kube/ && sudo cp /etc/rancher/k3s/k3s.yaml /home/$${USER}/.kube/config && sudo chown $${USER} /home/$${USER}/.kube/config",
"if ${var.kube_vip_enable} == true;then sudo mkdir -p /var/lib/rancher/k3s/server/manifests/ && sudo cp /tmp/kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml;else rm /tmp/kube-vip.yaml ;fi",
"if ${var.cilium} == true;then : ;else exit 0;fi",
"curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | sudo bash && helm repo add cilium https://helm.cilium.io/",
"helm install cilium cilium/cilium --namespace kube-system ${local.cil_vers} --set k8sServiceHost=${local.vip},ipam.operator.clusterPoolIPv4PodCIDRList=[\"${var.k3s_cluster_cidr}\"] ${local.helm_flags}",
"sleep 10 && sudo kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '<none>' | awk '{print \"-n \"$1\" \"$2}' | xargs -L 1 -r sudo kubectl delete pod",
"sudo rm -rf /home/$${USER}/.kube"
]
connection {
type = "ssh"
host = self.ssh_host
user = self.ssh_user
private_key = var.private_ssh_key
}
}
provisioner "remote-exec" {
# Checks if it is running in a non-first master,if true joins itself to the first master, else exits without error
inline = [
"if ip a | grep inet | awk '{print $2}' | grep -w ${cidrhost(var.masters.subnet, local.start_ip_master)};then exit 0;fi",
"while true;do curl -sSL https://${local.vip}:6443;if test $? -ne 0; then break;else sleep 1;fi;done",
"sudo mkdir -p /etc/rancher/k3s",
"sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml",
"if ${var.kube_vip_enable} == true;then sudo mkdir -p /var/lib/rancher/k3s/server/manifests && sudo cp /tmp/kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml;else rm /tmp/kube-vip.yaml ;fi",
"curl -sfL https://get.k3s.io | sudo ${local.k3ver} sh -s - server --server https://${local.vip}:6443 --token ${random_password.k3s-token.result}",
"sleep 5"
]
connection {
type = "ssh"
host = self.ssh_host
user = self.ssh_user
private_key = var.private_ssh_key
}
}
lifecycle {
ignore_changes = [
disk,
network,
serial,
vga,
tags,
qemu_os,
ciuser
]
}
depends_on = [
resource.random_password.k3s-token
]
}