forked from kubealex/libvirt-k8s-provisioner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path05_provisioning_vms.yml
155 lines (138 loc) · 5.89 KB
/
05_provisioning_vms.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
- name: This play provisions k8s VMs based on intial config
hosts: vm_host
vars_files:
- vars/k8s_cluster.yml
tasks:
- name: Download CentOS image
get_url:
url: "{{ centos.cloud_image }}"
dest: /tmp/{{ image_name }}.qcow2
mode: 0777
when: k8s.cluster_os == 'CentOS'
- name: Download Ubuntu image
get_url:
url: "{{ ubuntu.cloud_image }}"
dest: /tmp/{{ image_name }}.qcow2
mode: 0777
when: k8s.cluster_os == 'Ubuntu'
- name: Create a copy for master nodes
copy:
src: /tmp/{{ image_name }}.qcow2
dest: /tmp/{{ image_name }}-master.qcow2
remote_src: true
- name: Create a copy for worker nodes
copy:
src: /tmp/{{ image_name }}.qcow2
dest: /tmp/{{ image_name }}-worker.qcow2
remote_src: true
- name: Grow cloud partition to avoid disk pressure
shell: qemu-img resize /tmp/{{ image_name }}-master.qcow2 {{ k8s.control_plane.disk }}G
- name: Grow cloud partition to avoid disk pressure
shell: qemu-img resize /tmp/{{ image_name }}-worker.qcow2 {{ k8s.worker_nodes.disk }}G
- name: Create ssh keypair
openssh_keypair:
path: "id_rsa"
- terraform:
project_path: "files/terraform/masters"
force_init: true
variables:
hostname: "{{ k8s.cluster_name }}-master"
domain: "{{ k8s.network.domain }}"
os: "{{ 'ubuntu' if k8s.cluster_os == 'Ubuntu' else 'centos' }}"
libvirt_network: "{{ k8s.cluster_name }}"
libvirt_pool: "{{ k8s.cluster_name }}"
cpu: "{{ k8s.control_plane.vcpu }}"
memory: "{{ k8s.control_plane.mem }}"
vm_count: "{{ k8s.control_plane.vms }}"
os_image_name: "{{ image_name }}-master.qcow2"
state: present
become: true
register: output_masters
- terraform:
project_path: "files/terraform/workers"
force_init: true
variables:
hostname: "{{ k8s.cluster_name }}-worker"
domain: "{{ k8s.network.domain }}"
os: "{{ 'ubuntu' if k8s.cluster_os == 'Ubuntu' else 'centos' }}"
libvirt_network: "{{ k8s.cluster_name }}"
libvirt_pool: "{{ k8s.cluster_name }}"
cpu: "{{ k8s.worker_nodes.vcpu }}"
memory: "{{ k8s.worker_nodes.mem }}"
vm_count: "{{ (k8s.worker_nodes.vms - rook.rook_cluster_size) if rook_ceph.install_rook else k8s.worker_nodes.vms }}"
vm_counter: "{{ rook.rook_cluster_size if rook_ceph.install_rook else 0 }}"
os_image_name: "{{ image_name }}-worker.qcow2"
state: present
become: true
register: output_workers
- terraform:
project_path: "files/terraform/workers-rook"
force_init: true
variables:
hostname: "{{ k8s.cluster_name }}-worker"
domain: "{{ k8s.network.domain }}"
os: "{{ 'ubuntu' if k8s.cluster_os == 'Ubuntu' else 'centos' }}"
libvirt_network: "{{ k8s.cluster_name }}"
libvirt_pool: "{{ k8s.cluster_name }}"
cpu: "{{ k8s.worker_nodes.vcpu }}"
memory: "{{ k8s.worker_nodes.mem }}"
rook_volume_size: "{{ rook_ceph.volume_size }}"
#vm_count: "{{ k8s.worker_nodes.vms }}"
vm_count: "{{ rook.rook_cluster_size }}"
os_image_name: "{{ image_name }}-worker.qcow2"
state: present
become: true
register: output_workers_rook
when: rook_ceph.install_rook
- name: Add masters to given group
add_host:
hostname: '{{ k8s.cluster_name }}-master-{{ item }}.{{ k8s.network.domain }}'
k8s_installed: false
node_hostname: '{{ k8s.cluster_name }}-master-{{ item }}.{{ k8s.network.domain }}'
node_mac: "{{ output_masters.outputs.macs.value[item]|lower }}"
node_fqdn: '{{ k8s.cluster_name }}-master-{{ item }}.{{ k8s.network.domain }}'
ansible_ssh_private_key_file: "id_rsa"
ansible_user: kube
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
groups:
- "masters"
group_children: "k8s_nodes"
loop: "{{ range(0, k8s.control_plane.vms) | list }}"
- name: Add workers to given group
add_host:
hostname: '{{ k8s.cluster_name }}-worker-{{ item }}.{{ k8s.network.domain }}'
node_hostname: '{{ k8s.cluster_name }}-worker-{{ item }}.{{ k8s.network.domain }}'
node_mac: "{{ output_workers_rook.outputs.macs.value[item]|lower if (rook_ceph.install_rook and item <= 2) else output_workers_rook.outputs.macs.value[item - rook.rook_cluster_size]|lower if (rook_ceph.install_rook and item > 2 ) else output_workers.outputs.macs.value[item]|lower }}"
node_fqdn: "{{ k8s.cluster_name }}-worker-{{ item }}.{{ k8s.network.domain }}"
ansible_ssh_private_key_file: "id_rsa"
ansible_user: kube
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
groups:
- "workers"
group_children: "k8s_nodes"
loop: "{{ range(0, k8s.worker_nodes.vms) | list }}"
- name: Ensure to clean known_hosts
known_hosts:
host: "{{ item }}"
path: ~/.ssh/known_hosts
state: absent
loop: "{{ query('inventory_hostnames', 'all:!vm_host') }}"
- name: Check connection and set facts
hosts: masters,workers
gather_facts: no
tasks:
- name: Wait 600 seconds for target connection to become reachable/usable
wait_for_connection:
timeout: 600
delay: 0
- ping:
- name: add hosts to children groups
group_by:
key: "{{ group_children }}"
- setup:
register: machine_facts
- set_fact:
host_ip: "{{ machine_facts.ansible_facts.ansible_default_ipv4.address }}"
host_interface: "{{ machine_facts.ansible_facts.ansible_default_ipv4.interface }}"
host_mac: "{{ machine_facts.ansible_facts.ansible_default_ipv4.macaddress }}"
host_fqdn: "{{ machine_facts.ansible_facts.ansible_fqdn }}"