forked from redhat-partner-solutions/crucible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
inventory.yml.sample
321 lines (266 loc) · 13.6 KB
/
inventory.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
all:
vars:
##################################
# Assisted Install Configuration #
##################################
# These options configure Assisted Installer and the resulting cluster
# https://generator.swagger.io/?url=https://raw.githubusercontent.com/openshift/assisted-service/58a6abd5c99d4e41d939be89cd0962433849a861/swagger.yaml
# See section: cluster-create-params
# Cluster name and dns domain combine to give the cluster namespace that will contain OpenShift endpoints
# e.g. api.clustername.example.lab, worker1.clustername.example.lab
cluster_name: clustername
base_dns_domain: example.lab
# OpenShift version (4.6.16, 4.7.33, 4.8.14 or 4.9.11)
openshift_full_version: 4.6.16
# Virtual IP addresses used to access the resulting OpenShift cluster
api_vip: 10.60.0.96 # the IP address to be used for api.clustername.example.lab and api-int.clustername.example.lab
ingress_vip: 10.60.0.97 # the IP address to be used for *.apps.clustername.example.lab
## Allocate virtual IPs via DHCP server. Equivalent to the vip_dhcp_allocation configuration option of Assisted Installer
vip_dhcp_allocation: false
# The subnet on which all nodes are (or will be) accessible.
machine_network_cidr: 10.60.0.0/24
# The IP address pool to use for service IP addresses
service_network_cidr: 172.30.0.0/16
# Cluster network settings. You are unlikely to need to change these
cluster_network_cidr: 10.128.0.0/14 # The subnet, internal to the cluster, on which pods will be assigned IPs
cluster_network_host_prefix: 23 # The subnet prefix length to assign to each individual node.
# # Cluster network provider. Cannot be changed after cluster is created.
# # The default is OpenShift SDN unless otherwise specified.
# network_type: OVNKubernetes
# network_type: OpenShiftSDN
######################################
# Prerequisite Service Configuration #
######################################
# Proxy settings. These settings apply to: Assisted Installer, Day1 clusters and Day2 clusters.
# This assumes the host where the AI runs and the OpenShift cluster share the same proxy settings.
# http_proxy: ""
# https_proxy: ""
# no_proxy: ""
# Flags to enable/disable prerequisite service setup
# You will need to ensure alternatives are available for anything that will not be automatically set up
setup_ntp_service: false
setup_dns_service: false
setup_pxe_service: false
setup_registry_service: false # Only required for a Restricted Network installation
setup_http_store_service: true
setup_assisted_installer: true # default is true you may wish to turn it off if multiple users are using the same instance.
# NTP Service
# ntp_server is the address at which the NTP service is (or will be) available
ntp_server: 10.40.0.100
# ntp_server_allow is the range of IPs the NTP service will respond to
# ntp_server_allow: 10.40.0.0/24 # not required if setup_ntp_service is false
# Mirror Registry Service parameters for a Restricted Network installation
# use_local_mirror_registry controls if the install process uses a local container registry (mirror_registry) or not.
# Set this to true to use the mirror registry service set up when `setup_registry_service` is true.
use_local_mirror_registry: false
# HTTP Store Configuration
# ISO name must include the `discovery` directory if you have a SuperMicro machine
discovery_iso_name: "discovery/{{ cluster_name }}/discovery-image.iso"
# discovery_iso_server must be discoverable from all BMCs in order for them to mount the ISO hosted there.
# It is usually necessary to specify different values for KVM nodes and/or physical BMCs if they are on different subnets.
discovery_iso_server: "http://{{ hostvars['http_store']['ansible_host'] }}"
############################
# Local File Configuration #
############################
repo_root_path: /home/redhat/crucible/ # path to repository root
# Directory in which created/updated artifacts are placed
fetched_dest: "{{ repo_root_path }}/fetched"
# Configure possible paths for the pull secret
# first one found will be used
# note: paths should be absolute
pull_secret_lookup_paths:
- "{{ fetched_dest }}/pull-secret.txt"
- "{{ repo_root_path }}/pull-secret.txt"
# Configure possible paths for the ssh public key used for debugging
# first one found will be used
# note: paths should be absolute
ssh_public_key_lookup_paths:
- "{{ fetched_dest }}/ssh_keys/{{ cluster_name }}.pub"
- "{{ repo_root_path }}/ssh_public_key.pub"
- ~/.ssh/id_rsa.pub
# Set the base directory to store ssh keys
ssh_key_dest_base_dir: /home/redhat
# The retrieved cluster kubeconfig will be placed on the bastion host at the following location
kubeconfig_dest_dir: /home/redhat/
kubeconfig_dest_filename: "{{ cluster_name }}-kubeconfig"
kubeadmin_dest_filename: "{{ cluster_name }}-kubeadmin.vault.yml"
# You can comment out the line below if you want the kubeadmin credentials to be stored in plain text
kubeadmin_vault_password_file_path: "{{ repo_root_path }}/kubeadmin_vault_password_file"
############################
# LOGIC: DO NOT TOUCH #
# vvvvvvvvvvvvvvvvvvvvvvvv #
############################
# pull secret logic, no need to change. Configure above
local_pull_secret_path: "{{ lookup('first_found', pull_secret_lookup_paths) }}"
pull_secret: "{{ lookup('file', local_pull_secret_path) }}"
# ssh key logic, no need to change. Configure above
local_ssh_public_key_path: "{{ lookup('first_found', ssh_public_key_lookup_paths) }}"
ssh_public_key: "{{ lookup('file', local_ssh_public_key_path) }}"
# provided mirror certificate logic, no need to change.
local_mirror_certificate_path: "{{ (setup_registry_service == true) | ternary(
fetched_dest + '/' + (hostvars['registry_host']['cert_file_prefix'] | default('registry')) + '.crt',
repo_root_path + '/mirror_certificate.txt')
}}"
mirror_certificate: "{{ lookup('file', local_mirror_certificate_path) }}"
openshift_version: "{{ openshift_full_version.split('.')[:2] | join('.') }}"
is_valid_single_node_openshift_config: "{{ (groups['nodes'] | length == 1) and (groups['masters'] | length == 1) }}"
############################
# ^^^^^^^^^^^^^^^^^^^^^^^^ #
# LOGIC: DO NOT TOUCH #
############################
children:
bastions: # n.b. Currently only a single bastion is supported
hosts:
bastion:
ansible_host: 10.40.0.100 # Must be reachable from the Ansible control node
ansible_connection: local # if your are not running crucible from the bastion then remove this line
# Configuration and access information for the pre-requisite services
# TODO: document differences needed for already-deployed and auto-deployed
services:
hosts:
assisted_installer:
ansible_host: service_host.example.lab
host: service_host.example.lab
port: 8090 # Do not change
registry_host:
ansible_host: registry.example.lab
registry_port: 5000
registry_fqdn: registry.example.lab # use in case of different FQDN for the cert
cert_common_name: "{{ registry_fqdn }}"
cert_country: US
cert_locality: Raleigh
cert_organization: Red Hat, Inc.
cert_organizational_unit: Lab
cert_state: NC
# Configure the following secret values in the inventory.vault.yml file
REGISTRY_HTTP_SECRET: "{{ VAULT_REGISTRY_HOST_REGISTRY_HTTP_SECRET | mandatory }}"
disconnected_registry_user: "{{ VAULT_REGISTRY_HOST_DISCONNECTED_REGISTRY_USER | mandatory }}"
disconnected_registry_password: "{{ VAULT_REGISTRY_HOST_DISCONNECTED_REGISTRY_PASSWORD | mandatory }}"
dns_host:
ansible_host: 10.40.0.100
# upstream_dns: 8.8.8.8 # an optional upstream dns server
# The following are required for DHCP setup
# use_dhcp: true
# use_pxe: false
# dhcp_range_first: 10.60.0.101
# dhcp_range_last: 10.60.0.105
# prefix: 24
# gateway: 10.60.0.1
http_store:
ansible_host: 10.40.0.100
tftp_host:
ansible_host: "{{ hostvars['dns_host']['ansible_host'] }}"
tftp_directory: /var/lib/tftpboot/
ntp_host:
ansible_host: 10.40.0.100
vm_hosts:
hosts:
vm_host1: # Required for using "KVM" nodes, ignored if not.
ansible_user: root
ansible_host: vm_host.clustername.example.lab
host_ip_keyword: ansible_host # the varname in the KVM node hostvars which contains the *IP* of the VM
images_dir: /home/redhat/libvirt/images # directory where qcow images will be placed.
vm_bridge_ip: 10.60.0.190 # IP for the bridge between VMs and machine network
vm_bridge_interface: ens7f1 # Interface to be connected to the bridge. DO NOT use your primary interface.
dns: 10.40.0.100 # DNS used by the bridge
# ssl cert configuration
# sushy_fqdn: ... # use in case of different FQDN for the cert
cert_vars_host_var_key: registry_host # Look up cert values from another host by name (excluding cert_common_name)
# or
# cert_country: US
# cert_locality: Raleigh
# cert_organization: Red Hat, Inc.
# cert_organizational_unit: Lab
# cert_state: NC
# Describe the desired cluster members
nodes:
# A minimum of three master nodes are required. More are supported.
# Worker nodes are not required, but if present there must be two or more.
#
# Node Required Vars:
# - role
# - Must be either "master" or "worker", and must match the group
#
# - mac
# - The MAC address of the node, used as a hardware identifier by Assisted Installer.
# - The value set here will be used when creating VMs and must be unique within the network.
#
# - vendor
# - One of "Dell", "HPE", "Lenovo", "SuperMicro", "KVM", "PXE" as the supported BMC APIs.
# - "KVM" identifies a node as a VM to be created. If a "KVM" node is present,
# then a "vm_host" must be defined in the node and a host with that name must exist
# inside the "vm_hosts" group.
# - "PXE" identifies a node as a baremetal that needs to boot from PXE.
#
# - bmc_address
# - bmc_user
# - bmc_password
# - details for the BMC that controls the node.
# - Must be set to the vm_host for "KVM" nodes.
#
# Static IP Vars:
# See docs/inventory.md: Network configuration section
#
# Optional Vars:
# - vm_spec
# - Specifications for the node:
# - cpu_cores
# - ram_mib
# - disk_size_gb
#
# - installation_disk_path
# - The value set here will be used by Assisted Installer as the installation disk device
# for a given host.
# - The value must be a path to the disk device, e.g. /dev/sda
# - If not specified, Assisted Installer will pick the first enumerated disk device for a
# given host.
vars:
# Set the login information for any BMCs. Note that these will be SET on the vm_host virtual BMC.
bmc_user: "{{ VAULT_NODES_BMC_USER | mandatory }}"
bmc_password: "{{ VAULT_NODES_BMC_PASSWORD | mandatory }}"
children:
masters:
vars:
role: master
vendor: KVM # this example is a virtual control plane
bmc_address: "vm_host.clustername.example.lab:8082" # port can be changed using sushy_tools_port on the vm_host
vm_host: vm_host1
vm_spec:
cpu_cores: 4
ram_mib: 16384
disk_size_gb: 50
hosts:
super1:
ansible_host: 10.60.0.101
mac: "DE:AD:BE:EF:C0:2C"
# # Uncomment to set custom BMC credentials for the node
# # These variables must be set in the inventory.vault.yml file
# bmc_user: "{{ VAULT_NODES_SUPER1_BMC_USER | mandatory }}"
# bmc_password: "{{ VAULT_NODES_SUPER1_BMC_PASSWORD | mandatory }}"
super2:
ansible_host: 10.60.0.102
mac: "DE:AD:BE:EF:C0:2D"
super3:
ansible_host: 10.60.0.103
mac: "DE:AD:BE:EF:C0:2E"
workers:
vars:
role: worker
vendor: Dell # This example uses baremetal worker nodes
hosts:
worker1:
ansible_host: 10.60.0.104
bmc_address: 172.28.11.25
mac: 3C:FD:FE:78:AB:03
# # Uncomment to set custom BMC credentials for the node
# # These variables must be set in the inventory.vault.yml file
# bmc_user: "{{ VAULT_NODES_WORKER1_BMC_USER | mandatory }}"
# bmc_password: "{{ VAULT_NODES_WORKER1_BMC_PASSWORD | mandatory }}"
# # Uncomment to set an alternate installation disk device for the node
# installation_disk_path: /dev/sdb
worker2:
ansible_host: 10.60.0.105
bmc_address: 172.28.11.26
mac: 3C:FD:FE:78:AB:04
# # Uncomment to set an alternate installation disk device for the node
# installation_disk_path: /dev/sdb