-
Notifications
You must be signed in to change notification settings - Fork 52
/
createdisk.sh
executable file
·215 lines (175 loc) · 9 KB
/
createdisk.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
#!/bin/bash
set -exuo pipefail
export LC_ALL=C
export LANG=C
source tools.sh
source createdisk-library.sh
SSH="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
SCP="scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i id_ecdsa_crc"
INSTALL_DIR=${1:-crc-tmp-install-data}
OPENSHIFT_VERSION=$(${JQ} -r .clusterInfo.openshiftVersion $INSTALL_DIR/crc-bundle-info.json)
BASE_DOMAIN=$(${JQ} -r .clusterInfo.baseDomain $INSTALL_DIR/crc-bundle-info.json)
BUNDLE_TYPE=$(${JQ} -r .type $INSTALL_DIR/crc-bundle-info.json)
case ${BUNDLE_TYPE} in
microshift)
destDirPrefix="crc_${BUNDLE_TYPE}"
BASE_OS=rhel
;;
okd)
destDirPrefix="crc_${BUNDLE_TYPE}"
# Base OS is not changed for scos-okd because `/proc/cmdline` still contain fedora-coreos
# https://github.com/okd-project/okd-scos/issues/18
BASE_OS=fedora-coreos
;;
snc)
destDirPrefix="crc"
BASE_OS=rhcos
;;
*)
echo "Unknown bundle type '$BUNDLE_TYPE'"
exit 1
;;
esac
# SNC_PRODUCT_NAME: If user want to use other than default product name (crc)
# VM_PREFIX: short VM name (set by SNC_PRODUCT_NAME) + random string generated by openshift-installer
SNC_PRODUCT_NAME=${SNC_PRODUCT_NAME:-crc}
VM_NAME=${SNC_PRODUCT_NAME}
VM_IP=$(sudo virsh domifaddr ${VM_NAME} | tail -2 | head -1 | awk '{print $4}' | cut -d/ -f1)
wait_for_ssh ${VM_NAME} ${VM_IP}
if [ ${BUNDLE_TYPE} != "microshift" ]; then
# Disable kubelet service
${SSH} core@${VM_IP} -- sudo systemctl disable kubelet
# Stop the kubelet service so it will not reprovision the pods
${SSH} core@${VM_IP} -- sudo systemctl stop kubelet
fi
# Enable the system and user level podman.socket service for API V2
${SSH} core@${VM_IP} -- sudo systemctl enable podman.socket
${SSH} core@${VM_IP} -- systemctl --user enable podman.socket
if [ ${BUNDLE_TYPE} == "microshift" ]; then
# Pull openshift release images because as part of microshift bundle creation we
# don't run microshift service which fetch these image but instead service is run
# as part of crc so user have a fresh cluster instead something already provisioned
# but images we cache it as part of bundle.
${SSH} core@${VM_IP} 'sudo bash -x -s' <<EOF
jq --raw-output '.images | to_entries | map(.value) | join("\n")' /usr/share/microshift/release/release-$(uname -i).json | xargs -n1 podman pull --authfile /etc/crio/openshift-pull-secret
EOF
# Disable firewalld otherwise generated bundle have it running and each podman container
# which try to expose a port need to added to firewalld rule manually
# also in case of microshift the ports like 2222, 443, 80 ..etc need to be manually added
# and OCP/OKD/podman bundles have it disabled by default.
${SSH} core@${VM_IP} -- sudo systemctl disable firewalld
${SSH} core@${VM_IP} -- cat /etc/microshift/config.yaml.default > config.yaml
${YQ} eval --inplace ".dns.baseDomain = \"${SNC_PRODUCT_NAME}.${BASE_DOMAIN}\"" config.yaml
${SCP} config.yaml core@${VM_IP}:/home/core
${SSH} core@${VM_IP} -- 'sudo mv /home/core/config.yaml /etc/microshift/config.yaml'
# Make sure `baseDomain` is set to crc.testing
${SSH} core@${VM_IP} -- "grep '^\s\+baseDomain: ${SNC_PRODUCT_NAME}.${BASE_DOMAIN}' /etc/microshift/config.yaml"
# Remove the lvm system.device file since it have diskID and deviceName which changes
# for different hypervisor and as per `man lvmdevices` if the file does not exist, or if lvm.conf
# includes use_devicesfile=0, then lvm will not use a devices file.
${SSH} core@${VM_IP} -- "sudo rm -fr /etc/lvm/devices/system.devices"
fi
remove_pull_secret_from_disk
if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" ]; then
prepare_hyperV ${VM_IP}
fi
prepare_qemu_guest_agent ${VM_IP}
image_tag="latest"
if podman manifest inspect quay.io/crcont/routes-controller:${OPENSHIFT_VERSION} >/dev/null 2>&1; then
image_tag=${OPENSHIFT_VERSION}
fi
# Add gvisor-tap-vsock service
${SSH} core@${VM_IP} 'sudo bash -x -s' <<EOF
podman create --name=gvisor-tap-vsock --privileged --net=host -v /etc/resolv.conf:/etc/resolv.conf -it quay.io/crcont/gvisor-tap-vsock:latest
podman generate systemd --restart-policy=no gvisor-tap-vsock > /etc/systemd/system/gvisor-tap-vsock.service
systemctl daemon-reload
systemctl enable gvisor-tap-vsock.service
EOF
# Add dummy crio-wipe service to instance
cat crio-wipe.service | ${SSH} core@${VM_IP} "sudo tee -a /etc/systemd/system/crio-wipe.service"
# Preload routes controller
${SSH} core@${VM_IP} -- "sudo podman pull quay.io/crcont/routes-controller:${image_tag}"
TAG=${image_tag} envsubst < routes-controller.yaml.in > $INSTALL_DIR/routes-controller.yaml
${SCP} $INSTALL_DIR/routes-controller.yaml core@${VM_IP}:/home/core/
${SSH} core@${VM_IP} -- 'sudo mkdir -p /opt/crc && sudo mv /home/core/routes-controller.yaml /opt/crc/'
if [ ${BUNDLE_TYPE} != "microshift" ]; then
# Add internalIP as node IP for kubelet systemd unit file
# More details at https://bugzilla.redhat.com/show_bug.cgi?id=1872632
${SSH} core@${VM_IP} 'sudo bash -x -s' <<EOF
echo '[Service]' > /etc/systemd/system/kubelet.service.d/80-nodeip.conf
echo 'Environment=KUBELET_NODE_IP="${VM_IP}"' >> /etc/systemd/system/kubelet.service.d/80-nodeip.conf
EOF
fi
if [ "${ARCH}" == "aarch64" ] && [ ${BUNDLE_TYPE} != "okd" ]; then
# Install qemu-user-static-x86 package from fedora-updates repo to run x86 image on M1
# Not supported by RHEL https://access.redhat.com/solutions/5654221 and not included
# in any subscription repo.
cat > /tmp/fedora-updates.repo <<'EOF'
[fedora-updates]
name=Fedora 41 - $basearch - Updates
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f41&arch=$basearch
enabled=1
type=rpm
repo_gpgcheck=0
gpgcheck=0
EOF
${SCP} /tmp/fedora-updates.repo core@${VM_IP}:/tmp
${SSH} core@${VM_IP} -- "sudo mv /tmp/fedora-updates.repo /etc/yum.repos.d"
${SSH} core@${VM_IP} -- "sudo rpm-ostree install qemu-user-static-x86"
fi
cleanup_vm_image ${VM_NAME} ${VM_IP}
# Delete all the pods and lease from the etcd db so that when this bundle is use for the cluster provision, everything comes up in clean state.
if [ ${BUNDLE_TYPE} != "microshift" ]; then
etcd_image=$(${SSH} core@${VM_IP} -- "sudo jq -r '.spec.containers[] | select(.name == \"etcd\") | .image' /etc/kubernetes/manifests/etcd-pod.yaml")
${SSH} core@${VM_IP} -- "sudo podman run --rm --network=host --privileged --replace --name crc-etcd --detach --entrypoint etcd -v /var/lib/etcd:/store \"${etcd_image}\" --data-dir /store"
sleep 5
${SSH} core@${VM_IP} -- "sudo podman exec crc-etcd etcdctl del --prefix /kubernetes.io/pods"
${SSH} core@${VM_IP} -- "sudo podman exec crc-etcd etcdctl del --prefix /kubernetes.io/leases"
${SSH} core@${VM_IP} -- "sudo podman stop crc-etcd"
fi
podman_version=$(${SSH} core@${VM_IP} -- 'rpm -q --qf %{version} podman')
# Get the rhcos ostree Hash ID
ostree_hash=$(${SSH} core@${VM_IP} -- "cat /proc/cmdline | grep -oP \"(?<=${BASE_OS}-).*(?=/vmlinuz)\"")
# Get the rhcos kernel release
kernel_release=$(${SSH} core@${VM_IP} -- 'uname -r')
# Get the kernel command line arguments
kernel_cmd_line=$(${SSH} core@${VM_IP} -- 'cat /proc/cmdline')
# Get the vmlinux/initramfs to /tmp/kernel and change permission for initramfs
${SSH} core@${VM_IP} -- "mkdir /tmp/kernel && sudo cp -r /boot/ostree/${BASE_OS}-${ostree_hash}/*${kernel_release}* /tmp/kernel && sudo chmod 644 /tmp/kernel/initramfs*"
# SCP the vmlinuz/initramfs from VM to Host in provided folder.
${SCP} -r core@${VM_IP}:/tmp/kernel/* $INSTALL_DIR
${SSH} core@${VM_IP} -- "sudo rm -fr /tmp/kernel"
# Shutdown the VM
shutdown_vm ${VM_NAME}
# Download podman clients
download_podman $podman_version ${yq_ARCH}
# libvirt image generation
get_dest_dir_suffix "${OPENSHIFT_VERSION}"
destDirSuffix="${DEST_DIR_SUFFIX}"
libvirtDestDir="${destDirPrefix}_libvirt_${destDirSuffix}"
rm -fr ${libvirtDestDir} ${libvirtDestDir}.crcbundle
mkdir "$libvirtDestDir"
create_qemu_image "$libvirtDestDir"
copy_additional_files "$INSTALL_DIR" "$libvirtDestDir" "${VM_NAME}"
if [ "${SNC_GENERATE_LINUX_BUNDLE}" != "0" ]; then
create_tarball "$libvirtDestDir"
fi
# HyperV image generation
#
# This must be done after the generation of libvirt image as it reuses some of
# the content of $libvirtDestDir
if [ "${SNC_GENERATE_WINDOWS_BUNDLE}" != "0" ]; then
hypervDestDir="${destDirPrefix}_hyperv_${destDirSuffix}"
rm -fr ${hypervDestDir} ${hypervDestDir}.crcbundle
generate_hyperv_bundle "$libvirtDestDir" "$hypervDestDir"
fi
# vfkit image generation
# This must be done after the generation of libvirt image as it reuses some of
# the content of $libvirtDestDir
if [ "${SNC_GENERATE_MACOS_BUNDLE}" != "0" ]; then
vfkitDestDir="${destDirPrefix}_vfkit_${destDirSuffix}"
rm -fr ${vfkitDestDir} ${vfkitDestDir}.crcbundle
generate_vfkit_bundle "$libvirtDestDir" "$vfkitDestDir" "$INSTALL_DIR" "$kernel_release" "$kernel_cmd_line"
# Cleanup up vmlinux/initramfs files
rm -fr "$INSTALL_DIR/vmlinuz*" "$INSTALL_DIR/initramfs*"
fi