-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkagnostic.yaml
113 lines (113 loc) · 5.33 KB
/
kagnostic.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
all:
vars:
rke_version: v1.4.2
netclient_version: 0.17.1-0
cluster_name: kagnostic-cluster
hosts:
netmaker:
# computing instance for Netmaker - GCP VM - europe-west3-a - e2-micro - 10 GB boot disc (standard) - Ubuntu
# it needs following firewall config like described in README.md
ansible_host: 34.141.32.232 # this is an ec2 instance atm - make sure this is a static ip, to not loose DNS entry validity
ansible_user: cortana # the ssh user to access this computing instance... make sure your public ssh key is stored in authorized_keys on the instance
netmaker_version: v0.17.1 # for stability we do NOT rely on any latest versions.
base_domain: k8s.voo.aero # will be used in the docker-compose template
letsencrypt_email: [email protected]
network:
id: kagnostic # TODO shouldn't be too long, otherwise api call fails badly ;-)
address_range: 10.11.12.0/24 # I guess this can be any private ip range you like (https://whatismyipaddress.com/private-ip#private-ip), whereas I assume only the last segment is usable without any setup complications
access_key:
name: exampleAccessKey
uses: 200
# SECRETS
# For the sake of simplicity we don't care about security in this one.
# BUT: I highly recommend to NOT store them plaintext in the inventory. Either inject them during runtime or store them encrypted in a separate file. We can go into that on demand ;-)
# In our production environment we have an encrypted file called `secret.sops.yaml` which contains all keys encrypted and decrypts them on runtime using SOPS ()
credentials: #please don't be "creative" with credentials! Just let a randomizer create your secrets
master_key: randomMasterKey
ui_admin_user: admin
ui_admin_pass: adminPW
mq_admin_password: mqAdminPW
rke:
# computing instance to run rancher kubernetes engine...
# GCP VM - europe-west3-c - e2-micro - 10 GB boot disc (standard)
# no sepcial firewall rules on this one... just keep 22 open for your ansible business..
# if you want to run a rancher thingy you'll have to open 443/80 as well to access webUI (I personally don't run it atm)
# TODO: I guess it's also ok to run this locally as well.
# So if you're using localhost it should also work, as long as we integrate localhost in networking infrastructure..
# this would have to done as external client, since localhost is not always online reliably
ansible_host: 34.159.50.95
ansible_user: cortana
netmaker_ip: 10.11.12.253
node1:
# computing instance for k8s node
# GCP VM - europe-west3-b - e2-medium (2vcpu, 4gb mem) - Standard GCP Linux OS
# see README.md for firewall settings
ansible_host: 34.107.25.224
ansible_user: cortana
netmaker_ip: 10.11.12.1
k8s:
roles:
- controlplane
- etcd
- worker
labels: # Kubernetes Labels for this node. They can be used for NodeSelectors later on These are just some examples...
- key: topology.kubernetes.io/region
value: frankfurt
- key: topology.kubernetes.io/zone
value: b
- key: node.cloudprovider.kubernetes.io/name
value: gcp
node2:
# computing instance for k8s node
# AWS EC2 instance - eu-central-1b - t2.medium (2vcpu, 4gb mem) - Ubuntu 22.04
# see README.md for firewall settings
# ATTENTION for EC2: make sure your hostname matches the hostname of the ansible file!
ansible_host: 18.159.11.66
ansible_user: ubuntu
netmaker_ip: 10.11.12.2
k8s:
roles:
- controlplane
- etcd
- worker
labels:
- key: topology.kubernetes.io/region
value: frankfurt
- key: topology.kubernetes.io/zone
value: a1
- key: node.cloudprovider.kubernetes.io/name
value: aws
node3:
# computing instance for k8s node
# Azure VM - West Europe - B2s (2vcpu, 4gb mem) - Ubuntu 22.04 - x64 arch
# azure doesn't support ed25519 public ssh keys... only rsa... need to create one for ansible install at least
# see README.md for firewall settings
ansible_host: 13.93.44.96
ansible_user: cortana
netmaker_ip: 10.11.12.3
k8s:
roles:
- controlplane
- etcd
- worker
labels:
- key: topology.kubernetes.io/region
value: westeurope
- key: topology.kubernetes.io/zone
value: netherlands
# ansible group section...
# it packs hosts defined above into logical groups so they can be used in playbooks
children: # this creates logical groups, which we can/must be used in ansible playbooks
onprem: # there's no bare metal on prem node here, so no hosts in this group
cloud:
hosts:
node1: # gcp
node2: # aws
node3: # azure
nodes:
children: # required if you want to make a group containing subgroups
onprem:
cloud:
rancher:
hosts:
rke: