diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/.gitignore b/Support/Multitenancy/Multiple-Ironic-conductors/.gitignore index f81d7471..9655cca7 100644 --- a/Support/Multitenancy/Multiple-Ironic-conductors/.gitignore +++ b/Support/Multitenancy/Multiple-Ironic-conductors/.gitignore @@ -9,3 +9,5 @@ macaddrs uuids sushy-tools-conf/* bmc-*.yaml +ironic.env +ironic_logs/* diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/Init-environment-v3.sh b/Support/Multitenancy/Multiple-Ironic-conductors/Init-environment-v3.sh new file mode 100755 index 00000000..5e46cacb --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/Init-environment-v3.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -e +trap 'trap - SIGTERM && kill -- -'$$'' SIGINT SIGTERM EXIT +__dir__=$(realpath "$(dirname "$0")") +# shellcheck disable=SC1091 +. ./config.sh +# This is temporarily required since https://review.opendev.org/c/openstack/sushy-tools/+/875366 has not been merged. +sudo ./vm-setup.sh +./configure-minikube.sh +sudo ./handle-images.sh +./build-sushy-tools-image.sh +./generate_unique_nodes.sh +./start_containers.sh +./start-minikube.sh +./build-api-server-container-image.sh + +./install-ironic.sh +./install-bmo.sh + +python create_nodes_v3.py + +export CLUSTER_TOPOLOGY=true +clusterctl init --infrastructure=metal3 +# kubectl apply -f capim-modified.yaml +# yq -i "select(documentIndex == 0) | .spec.replicas = ${N_APISERVER_PODS}" apiserver-deployments.yaml +kubectl apply -f apiserver-deployments.yaml +./generate-certificates.sh +# Wait for apiserver pod to exists +sleep 90 + +./create-cluster.sh diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/README.md b/Support/Multitenancy/Multiple-Ironic-conductors/README.md index 3af02cfc..14ddd4e0 100644 --- a/Support/Multitenancy/Multiple-Ironic-conductors/README.md +++ b/Support/Multitenancy/Multiple-Ironic-conductors/README.md @@ -135,6 +135,19 @@ Now, if you open another terminal and run `kubectl -n metal3 get BMH --watch`, y Just like before, all of the steps can be ran at once by running the `./Init-environment-v2.sh` script. This script also respects configuration in `config.sh`. +# Multiple ironics - full setup + +With BMO already working, we can now proceed to making the multiple ironic conductor and fake ipa work with CAPI and CAPM3, i.e. we will aim to "create" clusters with these fake nodes. Since we do not have any nodes to install the k8s apiserver onto, we will attempt to install the apiserver directly on top of the management cluster, using the great research and experiment that was done by our colleague Lennart Jern, which can be read in full [here](https://github.com/metal3-io/metal3-io.github.io/blob/0592e636bb10b1659437790b38f85cc49c552239/_posts/2023-05-17-Scaling_part_2.md) + +In short, for this story to work, you will need to install `kubeadm` and `clustctl` on your system. To simulate the `etcd` server, we added the script `start_fake_etcd.sh` into the equation. + +All the setup steps can be run at once with the script `Init-environment-v3.sh`. After that, each time we run the script `create-cluster.sh`, a new BMH man ifest will be applied, and a new 1-node cluster will be created (the 1 node is, of course, coming with 1 kcp object, 1 `Machine` object, and 1 `Metal3Machine` object as usual). + +Compared to Lennart's setup, ours has a couple of differences and notes: +- Our BMO doesn't run in test mode. Instead, we use `fake-ipa` to "trick" `ironic` to think that it is talking with real nodes. +- We don't expose the apiservers using the domain `test-kube-apiserver.NAMESPACE.svc.cluster.local` (in fact, we still do, but it doesn't seem to expose anything). Instead, we use the ClusterIP ip of the apiserver service. +- We also bump into the issue of lacking resources due to apiservers taking up too much, so the number of nodes/clusters we can simulate will not be too high. (So far, we have not been able to try running these apiservers on external VMs yet.) Another way to solve this issue might be to come up with some sort of apiserver simulation, the kind of things we already did with `fake-ipa`. + # Requirements This study was conducted on a VM with the following specs: diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/apiserver-deployments.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/apiserver-deployments.yaml new file mode 100644 index 00000000..cc141033 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/apiserver-deployments.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: capim-deployment + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: capim + strategy: + type: Recreate + template: + metadata: + labels: + app: capim + spec: + containers: + - image: 172.22.0.1:5000/localimages/capim + imagePullPolicy: Always + name: capim + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + name: apiserver +--- +apiVersion: v1 +kind: Service +metadata: + name: apiserver-service +spec: + selector: + app: capim + ports: + - protocol: TCP + port: 3333 + targetPort: 3333 diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/build-api-server-container-image.sh b/Support/Multitenancy/Multiple-Ironic-conductors/build-api-server-container-image.sh new file mode 100755 index 00000000..08e3065a --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/build-api-server-container-image.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +__dir__=$(realpath $(dirname $0)) +IMAGE_NAME="172.22.0.1:5000/localimages/capim" + +if [[ ${1:-""} == "-f" ]]; then + sudo podman rmi "${IMAGE_NAME}" + kubectl delete -f capim-modified.yaml +fi + +if [[ $(sudo podman images | grep ${IMAGE_NAME}) != "" ]]; then + sudo podman push --tls-verify=false "${IMAGE_NAME}" + exit 0 +fi +CAPI_DIR="/tmp/cluster-api" +if [[ ! -d "${CAPI_DIR}" ]]; then + git clone https://github.com/kubernetes-sigs/cluster-api.git "${CAPI_DIR}" +fi + +cd "${CAPI_DIR}" + +INMEMORY_DIR="${CAPI_DIR}/test/infrastructure/inmemory" + +cp "${__dir__}/main.go" "${INMEMORY_DIR}/main.go" + +cd "${INMEMORY_DIR}" || exit + +sudo podman build --build-arg=builder_image=docker.io/library/golang:1.20.8 --build-arg=goproxy=https://proxy.golang.org,direct --build-arg=ARCH=amd64 --build-arg=ldflags="-X 'sigs.k8s.io/cluster-api/version.buildDate=2023-10-10T11:47:30Z' -X 'sigs.k8s.io/cluster-api/version.gitCommit=8ba3f47b053da8bbf63cf407c930a2ee10bfd754' -X 'sigs.k8s.io/cluster-api/version.gitTreeState=dirty' -X 'sigs.k8s.io/cluster-api/version.gitMajor=1' -X 'sigs.k8s.io/cluster-api/version.gitMinor=0' -X 'sigs.k8s.io/cluster-api/version.gitVersion=v1.0.0-4041-8ba3f47b053da8-dirty' -X 'sigs.k8s.io/cluster-api/version.gitReleaseCommit=e09ed61cc9ba8bd37b0760291c833b4da744a985'" ../../.. -t "${IMAGE_NAME}" --file Dockerfile + +sudo podman push --tls-verify=false "${IMAGE_NAME}" diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/build-sushy-tools-image.sh b/Support/Multitenancy/Multiple-Ironic-conductors/build-sushy-tools-image.sh index e7459034..b886ee33 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/build-sushy-tools-image.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/build-sushy-tools-image.sh @@ -1,10 +1,19 @@ #!/bin/bash # -SUSHYTOOLS_DIR="$HOME/sushy-tools" +IMAGE_NAME="127.0.0.1:5000/localimages/sushy-tools" +if [[ ${1:-""} == "-f" ]]; then + sudo podman rmi "${IMAGE_NAME}" +fi + +if [[ $(sudo podman images | grep ${IMAGE_NAME}) != "" ]]; then + sudo podman push --tls-verify=false "${IMAGE_NAME}" + exit 0 +fi +SUSHYTOOLS_DIR="/tmp/sushy-tools" rm -rf "$SUSHYTOOLS_DIR" git clone https://opendev.org/openstack/sushy-tools.git "$SUSHYTOOLS_DIR" cd "$SUSHYTOOLS_DIR" || exit -git fetch https://review.opendev.org/openstack/sushy-tools refs/changes/66/875366/35 && git cherry-pick FETCH_HEAD +git fetch https://review.opendev.org/openstack/sushy-tools refs/changes/66/875366/36 && git cherry-pick FETCH_HEAD pip3 install build python3 -m build @@ -43,5 +52,5 @@ RUN mkdir -p /root/sushy CMD ["sushy-emulator", "-i", "::", "--config", "/root/sushy/conf.py"] EOF -sudo podman build -t 127.0.0.1:5000/localimages/sushy-tools . -rm -rf "$SUSHYTOOLS_DIR" +sudo podman build -t "${IMAGE_NAME}" . +sudo podman push --tls-verify=false "${IMAGE_NAME}" diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/capim-modified.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/capim-modified.yaml new file mode 100644 index 00000000..319b97d8 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/capim-modified.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: apiserver + labels: + app: manager +spec: + containers: + - image: 172.22.0.1:5000/localimages/capim + imagePullPolicy: Always + name: capim + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + restartPolicy: Always diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/clean.sh b/Support/Multitenancy/Multiple-Ironic-conductors/clean.sh index 6cc4b9f0..81e57e00 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/clean.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/clean.sh @@ -31,14 +31,18 @@ minikube stop minikube delete --all --purge # Stop and delete containers -containers=("ironic-ipa-downloader" "ironic" "keepalived" "registry" "ironic-client" "fake-ipa" "openstack-client" "httpd-infra") -for i in $(seq 1 "$N_SUSHY"); do - containers+=("sushy-tools-$i") -done -for container in "${containers[@]}"; do - echo "Deleting the container: $container" - sudo podman stop "$container" &>/dev/null - sudo podman rm "$container" &>/dev/null +declare -a running_containers=($(sudo podman ps --all --format json | jq -r '.[].Names[0]')) +echo ${running_containers[0]} +declare -a containers=("ipa-downloader" "ironic" "keepalived" "registry" "ironic-client" "openstack-client" "httpd-infra") + +for container in "${running_containers[@]}"; do + if [[ "${containers[@]}" =~ "${container}" || "${container}" =~ "sushy-tools-"* || "${container}" =~ "fake-ipa-"* ]]; then + echo "Deleting the container: ${container}" + sudo podman stop "$container" &>/dev/null + sudo podman rm "$container" &>/dev/null + fi done -rm -rf macaddrs uuids node.json nodes.json batch.json +rm -rf bmc-*.yaml + +rm -rf macaddrs uuids node.json nodes.json batch.json in-memory-development.yaml sushy-tools-conf ironic.env diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/config.sh b/Support/Multitenancy/Multiple-Ironic-conductors/config.sh index d8b600d2..ac5c644b 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/config.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/config.sh @@ -2,5 +2,18 @@ # export N_NODES=1000 export N_SUSHY=30 -# Put the endpoints of different ironics, separated by spaces -export IRONIC_ENDPOINTS="172.22.0.2 172.22.0.3 172.22.0.4 172.22.0.5" +export N_FAKE_IPA=6 +export N_IRONICS=11 +export N_APISERVER_PODS=2 +# export N_NODES=50 +# export N_SUSHY=2 +# export N_FAKE_IPA=2 +# export N_IRONICS=3 + +# Translating N_IRONICS to IRONIC_ENDPOINTS. Don't change this part +IRONIC_ENDPOINTS="172.22.0.2" +for i in $(seq 2 $N_IRONICS); do + IRONIC_ENDPOINTS="${IRONIC_ENDPOINTS} 172.22.0.$(( i + 1 ))" +done +export IRONIC_ENDPOINTS + diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/configure-minikube.sh b/Support/Multitenancy/Multiple-Ironic-conductors/configure-minikube.sh index ab43f8f2..c31b267e 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/configure-minikube.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/configure-minikube.sh @@ -5,7 +5,7 @@ minikube config set memory 4096 sudo usermod --append --groups libvirt "$(whoami)" while /bin/true; do minikube_error=0 - minikube start --insecure-registry 172.22.0.1:5000 || minikube_error=1 + minikube start --insecure-registry 172.22.0.1:5000 --memory 32000 --cpus 16 || minikube_error=1 if [[ $minikube_error -eq 0 ]]; then break fi diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/create-cluster.sh b/Support/Multitenancy/Multiple-Ironic-conductors/create-cluster.sh new file mode 100755 index 00000000..bd1deaca --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/create-cluster.sh @@ -0,0 +1,132 @@ +#!/bin/bash +# + +source ./config.sh +CLUSTER_TEMPLATE=manifests/cluster-template.yaml +export CLUSTER_APIENDPOINT_PORT="6443" +export IMAGE_CHECKSUM="97830b21ed272a3d854615beb54cf004" +export IMAGE_CHECKSUM_TYPE="md5" +export IMAGE_FORMAT="raw" +export KUBERNETES_VERSION="v1.26.0" +export WORKERS_KUBEADM_EXTRA_CONFIG="" +export WORKER_MACHINE_COUNT="0" +export NODE_DRAIN_TIMEOUT="60s" +export CTLPLANE_KUBEADM_EXTRA_CONFIG="" + + +create_cluster() { + bmh_index="${1}" + cluster="test${bmh_index}" + namespace="${cluster}" + nodename="${cluster}" + fake_ipa_port=$(( 9901 + (( $bmh_index % ${N_FAKE_IPA} )) )) + + export IMAGE_URL="http://192.168.111.1:${fake_ipa_port}/images/rhcos-ootpa-latest.qcow2" + + kubectl port-forward service/apiserver-service 3333:3333 2>/dev/null& + + echo "Creating cluster ${cluster} in namespace ${namespace}" + kubectl create namespace "${namespace}" + kubectl -n "${namespace}" apply -f bmc-${nodename}.yaml + + caKeyEncoded=$(cat /tmp/ca.key | base64 -w 0) + caCertEncoded=$(cat /tmp/ca.crt | base64 -w 0) + etcdKeyEncoded=$(cat /tmp/etcd.key | base64 -w 0) + etcdCertEncoded=$(cat /tmp/etcd.crt | base64 -w 0) + + cluster_endpoints=$(curl "localhost:3333/register?resource=${namespace}/${cluster}&caKey=${caKeyEncoded}&caCert=${caCertEncoded}&etcdKey=${etcdKeyEncoded}&etcdCert=${etcdCertEncoded}") + host=$(echo ${cluster_endpoints} | jq -r ".Host") + port=$(echo ${cluster_endpoints} | jq -r ".Port") + + cat < "/tmp/${cluster}-ca-secrets.yaml" +apiVersion: v1 +kind: Secret +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${cluster} + name: ${cluster}-ca + namespace: ${namespace} +type: kubernetes.io/tls +data: + tls.crt: ${caCertEncoded} + tls.key: ${caKeyEncoded} +EOF + + kubectl -n ${namespace} apply -f /tmp/${cluster}-ca-secrets.yaml + + cat < "/tmp/${cluster}-etcd-secrets.yaml" +apiVersion: v1 +kind: Secret +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${cluster} + name: ${cluster}-etcd + namespace: ${namespace} +type: kubernetes.io/tls +data: + tls.crt: ${etcdCertEncoded} + tls.key: ${etcdKeyEncoded} +EOF + + kubectl -n ${namespace} apply -f /tmp/${cluster}-etcd-secrets.yaml + + # Generate metal3 cluster + export CLUSTER_APIENDPOINT_HOST="${host}" + export CLUSTER_APIENDPOINT_PORT="${port}" + echo "Generating cluster ${cluster} with clusterctl" + clusterctl generate cluster "${cluster}" \ + --from "${CLUSTER_TEMPLATE}" \ + --target-namespace "${namespace}" > /tmp/${cluster}-cluster.yaml + kubectl apply -f /tmp/${cluster}-cluster.yaml + + sleep 10 + + wait_for_resource() { + resource=$1 + jsonpath=${2:-"{.items[0].metadata.name}"} + while true; do + kubectl -n "${namespace}" get "${resource}" -o jsonpath="${jsonpath}" 2> /dev/null + if [ $? -eq 0 ]; then + return + fi + sleep 2 + done + } + + bmh_name=$(wait_for_resource "bmh") + metal3machine=$(wait_for_resource "m3m") + machine=$(wait_for_resource "machine") + + providerID="metal3://$namespace/$bmh_name/$metal3machine" + curl "localhost:3333/updateNode?resource=${namespace}/${cluster}&nodeName=${machine}&providerID=${providerID}" + echo "Done generating cluster ${cluster} with clusterctl" +} + +for i in $(seq 1 $N_NODES); do + namespace="test${i}" + if [[ $(kubectl get ns | grep "${namespace}") != "" ]]; then + echo "ERROR: Namespace ${namespace} exists. Skip creating cluster" + continue + fi + create_cluster "${i}" +done + +# Wait for all BMHs to be available. Clusters should be more or less ready by then. +desired_states=("available" "provisioning" "provisioned") +for i in $(seq 1 $N_NODES); do + namespace="test${i}" + bmh_name="$(kubectl -n ${namespace} get bmh -o jsonpath='{.items[0].metadata.name}')" + echo "Waiting for BMH ${bmh_name} to become available." + while true; do + bmh_state="$(kubectl -n ${namespace} get bmh -o jsonpath='{.items[0].status.provisioning.state}')" + if [[ "${desired_states[@]}" =~ "${bmh_state}" ]]; then + break + fi + sleep 3 + done +done + +# Run describe for all clusters +for i in $(seq 1 $N_NODES); do + clusterctl -n "test${i}" describe cluster "test${i}" +done diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/create_nodes_v3.py b/Support/Multitenancy/Multiple-Ironic-conductors/create_nodes_v3.py new file mode 100644 index 00000000..d98cdff0 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/create_nodes_v3.py @@ -0,0 +1,60 @@ +import json +import subprocess +import time +import random +import os +from multiprocessing import Pool + +with open("nodes.json") as f: + nodes = json.load(f) + +def generate_random_mac(): + # Generate a random MAC address + mac = [random.randint(0x00, 0xff) for _ in range(6)] + # Set the locally administered address bit (2nd least significant bit of the 1st byte) to 1 + mac[0] |= 0x02 + # Format the MAC address + mac_address = ':'.join('%02x' % b for b in mac) + return mac_address + +def create_node(node): + uuid = node["uuid"] + name = node["name"] + port = 8001 + (int(name.strip("test")) - 1) % int(os.environ.get("N_SUSHY", 10)) + # subprocess.run(["baremetal", "node", "create", "--driver", "redfish", "--driver-info", + # f"redfish_address=http://192.168.111.1:{port}", "--driver-info", + # f"redfish_system_id=/redfish/v1/Systems/{uuid}", "--driver-info", + # "redfish_username=admin", "--driver-info", "redfish_password=password", + # "--uuid", uuid, "--name", name], stdout=subprocess.DEVNULL) + random_mac = generate_random_mac() + manifest = f"""--- +apiVersion: v1 +kind: Secret +metadata: + name: {name}-bmc-secret + labels: + environment.metal3.io: baremetal +type: Opaque +data: + username: YWRtaW4= + password: cGFzc3dvcmQ= +--- +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + name: {name} +spec: + online: true + bmc: + address: redfish+http://192.168.111.1:{port}/redfish/v1/Systems/{uuid} + credentialsName: {name}-bmc-secret + bootMACAddress: {random_mac} + bootMode: legacy +""" + with open(f"bmc-{name}.yaml", "w") as f: + f.write(manifest) + print(f"Created manifests for node {name}") + +if __name__ == "__main__": + with Pool(100) as p: + conductors = p.map(create_node, nodes) diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/generate-certificates.sh b/Support/Multitenancy/Multiple-Ironic-conductors/generate-certificates.sh new file mode 100755 index 00000000..d8e3e7aa --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/generate-certificates.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# + +openssl req -x509 -subj "/CN=Kubernetes API" -new -newkey rsa:2048 -nodes -keyout "/tmp/ca.key" -sha256 -days 3650 -out "/tmp/ca.crt" + +openssl req -x509 -subj "/CN=ETCD CA" -new -newkey rsa:2048 -nodes -keyout "/tmp/etcd.key" -sha256 -days 3650 -out "/tmp/etcd.crt" diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/generate_unique_nodes.sh b/Support/Multitenancy/Multiple-Ironic-conductors/generate_unique_nodes.sh index e66860bd..4188ace6 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/generate_unique_nodes.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/generate_unique_nodes.sh @@ -23,7 +23,7 @@ echo '[]' > nodes.json for i in $(seq 1 "${N_NODES:-100}"); do uuid=$(generate_unique uuidgen uuids) macaddr=$(generate_unique macgen macaddrs) - name="fake${i}" + name="test${i}" jq --arg node_name "${name}" \ --arg uuid "${uuid}" \ --arg macaddr "${macaddr}" \ diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/get_ironic_logs.sh b/Support/Multitenancy/Multiple-Ironic-conductors/get_ironic_logs.sh new file mode 100755 index 00000000..83ad683b --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/get_ironic_logs.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# +log_dir="ironic_logs" +rm -rf ${log_dir} +mkdir -p ${log_dir} +ns="baremetal-operator-system" +pod_names=($(kubectl -n "${ns}" get pods -o json | jq -r ".items[].metadata.name")) +for name in ${pod_names[@]}; do + containers=($(kubectl -n "${ns}" get pod ${name} -o json | jq -r ".spec.containers[].name")) + for c in ${containers[@]}; do + kubectl -n "${ns}" logs ${name} -c ${c} > "${log_dir}/${name}-${c}-log.txt" + done +done diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/handle-images.sh b/Support/Multitenancy/Multiple-Ironic-conductors/handle-images.sh index b31b661e..c6de3e53 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/handle-images.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/handle-images.sh @@ -7,7 +7,7 @@ IMAGE_NAMES=( # For now, sushy-tools needs to be compiled locally with https://review.opendev.org/c/openstack/sushy-tools/+/875366 # "quay.io/metal3-io/sushy-tools" "quay.io/metal3-io/ironic-ipa-downloader" - "quay.io/metal3-io/ironic:latest" + "quay.io/metal3-io/ironic" "quay.io/metal3-io/ironic-client" "quay.io/metal3-io/keepalived:v0.2.0" "quay.io/metal3-io/mariadb:latest" @@ -29,14 +29,11 @@ podman pod create -n ironic-pod || true for NAME in "${IMAGE_NAMES[@]}"; do # Pull and tag the image podman pull "$NAME" - podman tag "$NAME" 127.0.0.1:"$REGISTRY_PORT"/localimages/"${NAME##*/}" + podman tag "$NAME" 127.0.0.1:"${REGISTRY_PORT}"/localimages/"${NAME##*/}" # Push the image to the local registry - podman push --tls-verify=false 127.0.0.1:5000/localimages/"${NAME##*/}" + podman push --tls-verify=false 127.0.0.1:"${REGISTRY_PORT}"/localimages/"${NAME##*/}" done -# This image was built earlier, but can only be pushed now, after the network was setup -podman push --tls-verify=false 127.0.0.1:5000/localimages/sushy-tools - __dir__=$(realpath "$(dirname "$0")") "$__dir__/ironic_tls_setup.sh" diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/install-bmo.sh b/Support/Multitenancy/Multiple-Ironic-conductors/install-bmo.sh index 8971c0e6..c37541c6 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/install-bmo.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/install-bmo.sh @@ -3,24 +3,12 @@ set -e kubectl create ns metal3 -BMOPATH=${BMOPATH:-$HOME/baremetal-operator} +BMOPATH=${BMOPATH:-/tmp/baremetal-operator} rm -rf ${BMOPATH} - git clone https://github.com/Nordix/baremetal-operator.git ${BMOPATH} -cat << EOF >"${BMOPATH}/config/default/ironic.env" -HTTP_PORT=6180 -PROVISIONING_INTERFACE=ironicendpoint -DHCP_RANGE=172.22.0.10,172.22.0.100 -DEPLOY_KERNEL_URL=http://172.22.0.2:6180/images/ironic-python-agent.kernel -DEPLOY_RAMDISK_URL=http://172.22.0.2:6180/images/ironic-python-agent.initramfs -IRONIC_ENDPOINT=https://172.22.0.2:6385/v1/ -IRONIC_INSPECTOR_ENDPOINT=https://172.22.0.2:5050/v1/ -CACHEURL=http://172.22.0.1/images -IRONIC_FAST_TRACK=true -EOF - +cp ironic.env "${BMOPATH}/config/default/ironic.env" kustomize build ${BMOPATH}/config/tls | kubectl apply -f - kubectl -n baremetal-operator-system wait --for=condition=available deployment/baremetal-operator-controller-manager --timeout=300s diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/install-ironic.sh b/Support/Multitenancy/Multiple-Ironic-conductors/install-ironic.sh index 15bf8601..b16d8760 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/install-ironic.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/install-ironic.sh @@ -1,19 +1,18 @@ #!/bin/bash set -e -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.yaml -kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-webhook --timeout=300s -kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-cainjector --timeout=300s -kubectl -n cert-manager wait --for=condition=available deployment/cert-manager --timeout=300s +kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-webhook --timeout=500s +kubectl -n cert-manager wait --for=condition=available deployment/cert-manager-cainjector --timeout=500s +kubectl -n cert-manager wait --for=condition=available deployment/cert-manager --timeout=500s if [[ ! -f ~/.ssh/id_rsa.pub ]]; then ssh-keygen -t ed25519 fi + # Install ironic -# read -ra PROVISIONING_IPS <<< "${IRONIC_ENDPOINTS}" -# helm install ironic ironic --set sshKey="$(cat ~/.ssh/id_rsa.pub)" --set ironicReplicas="{$(echo "$IRONIC_ENDPOINTS" | sed 's/ /\,/g')}" --wait -helm install ironic ironic --set sshKey="$(cat ~/.ssh/id_rsa.pub)" --set ironicReplicas="{${IRONIC_ENDPOINTS// /\,}}" --wait +helm install ironic ironic --set sshKey="$(cat ~/.ssh/id_rsa.pub)" --set ironicReplicas="{${IRONIC_ENDPOINTS// /\,}}" --wait --timeout 20m ironic_client="ironicclient.sh" openstack_dir="${PWD}/_clouds_yaml" diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/ironic/templates/deployments/ironic.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/ironic/templates/deployments/ironic.yaml index d179fe1c..ca1fe77a 100644 --- a/Support/Multitenancy/Multiple-Ironic-conductors/ironic/templates/deployments/ironic.yaml +++ b/Support/Multitenancy/Multiple-Ironic-conductors/ironic/templates/deployments/ironic.yaml @@ -139,18 +139,20 @@ spec: - mountPath: /shared name: ironic-data-volume hostNetwork: true - initContainers: - - command: - - /usr/local/bin/get-resource.sh - envFrom: - - configMapRef: - name: baremetal-operator-ironic-bmo-configmap - image: 172.22.0.1:5000/localimages/ironic-ipa-downloader - imagePullPolicy: Always - name: ironic-ipa-downloader - volumeMounts: - - mountPath: /shared - name: ironic-data-volume + # This was removed thanks to a hack in hostPath. Needs reconsideration if taken to + # production. + # initContainers: + # - command: + # - touch /shared/ironic-python-agent.kernel /shared/ironic-python-agent.initramfs + # envFrom: + # - configMapRef: + # name: baremetal-operator-ironic-bmo-configmap + # image: ubuntu + # imagePullPolicy: Always + # name: fake-ironic-ipa-downloader + # volumeMounts: + # - mountPath: /shared + # name: ironic-data-volume volumes: - name: cert-mariadb secret: @@ -176,6 +178,8 @@ spec: - name: cert-ironic-inspector secret: secretName: ironic-inspector-cert - - emptyDir: {} - name: ironic-data-volume + - name: ironic-data-volume + hostPath: + path: /shared + type: Directory {{- end }} diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/kube-controller-manager-pod.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/kube-controller-manager-pod.yaml new file mode 100644 index 00000000..62ad653e --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/kube-controller-manager-pod.yaml @@ -0,0 +1,131 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + kubernetes.io/config.hash: aaff90ec64f346d418f0a93d766752c5 + kubernetes.io/config.mirror: aaff90ec64f346d418f0a93d766752c5 + kubernetes.io/config.seen: "2023-01-23T07:16:28.924430094Z" + kubernetes.io/config.source: file + labels: + component: kube-controller-manager + tier: control-plane + name: kube-controller-manager-node-name + namespace: kube-system +spec: + containers: + - command: + - kube-controller-manager + - --allocate-node-cidrs=true + - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf + - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf + - --bind-address=127.0.0.1 + - --client-ca-file=/etc/kubernetes/pki/ca.crt + - --cluster-cidr=10.244.0.0/16 + - --cluster-name=kind + - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt + - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key + - --controllers=*,bootstrapsigner,tokencleaner + - --enable-hostpath-provisioner=true + - --kubeconfig=/etc/kubernetes/controller-manager.conf + - --leader-elect=true + - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt + - --root-ca-file=/etc/kubernetes/pki/ca.crt + - --service-account-private-key-file=/etc/kubernetes/pki/sa.key + - --service-cluster-ip-range=10.96.0.0/16 + - --use-service-account-credentials=true + image: registry.k8s.io/kube-controller-manager:v1.26.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 8 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10257 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 15 + name: kube-controller-manager + resources: + requests: + cpu: 200m + startupProbe: + failureThreshold: 24 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10257 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 15 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/ssl/certs + name: ca-certs + readOnly: true + - mountPath: /etc/ca-certificates + name: etc-ca-certificates + readOnly: true + - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + name: flexvolume-dir + - mountPath: /etc/kubernetes/pki + name: k8s-certs + readOnly: true + - mountPath: /etc/kubernetes/controller-manager.conf + name: kubeconfig + readOnly: true + - mountPath: /usr/local/share/ca-certificates + name: usr-local-share-ca-certificates + readOnly: true + - mountPath: /usr/share/ca-certificates + name: usr-share-ca-certificates + readOnly: true + dnsPolicy: ClusterFirst + enableServiceLinks: true + hostNetwork: true + nodeName: node-name + preemptionPolicy: PreemptLowerPriority + priority: 2000001000 + priorityClassName: system-node-critical + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + operator: Exists + volumes: + - hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate + name: ca-certs + - hostPath: + path: /etc/ca-certificates + type: DirectoryOrCreate + name: etc-ca-certificates + - hostPath: + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + type: DirectoryOrCreate + name: flexvolume-dir + - hostPath: + path: /etc/kubernetes/pki + type: DirectoryOrCreate + name: k8s-certs + - hostPath: + path: /etc/kubernetes/controller-manager.conf + type: FileOrCreate + name: kubeconfig + - hostPath: + path: /usr/local/share/ca-certificates + type: DirectoryOrCreate + name: usr-local-share-ca-certificates + - hostPath: + path: /usr/share/ca-certificates + type: DirectoryOrCreate + name: usr-share-ca-certificates diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/main.go b/Support/Multitenancy/Multiple-Ironic-conductors/main.go new file mode 100644 index 00000000..07397f63 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/main.go @@ -0,0 +1,271 @@ +package main + +import ( + "context" + "crypto/rsa" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" + + "encoding/base64" + "encoding/json" + + "fmt" + "io" + "net/http" + "os" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" + "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + "sigs.k8s.io/cluster-api/util/certs" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + cloudScheme = runtime.NewScheme() + scheme = runtime.NewScheme() + cloudMgr = cmanager.New(cloudScheme) + apiServerMux = &server.WorkloadClustersMux{} + key *rsa.PrivateKey + ctx = context.Background() +) + +func init() { + // scheme used for operating on the management cluster. + _ = clientgoscheme.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + + // scheme used for operating on the cloud resource. + _ = cloudv1.AddToScheme(cloudScheme) + _ = corev1.AddToScheme(cloudScheme) + _ = appsv1.AddToScheme(cloudScheme) + _ = rbacv1.AddToScheme(cloudScheme) +} + +type ResourceData struct { + ResourceName string + Host string + Port int +} + +func register(w http.ResponseWriter, r *http.Request) { + resourceName := r.URL.Query().Get("resource") + resp := &ResourceData{} + resp.ResourceName = resourceName + cloudMgr.AddResourceGroup(resourceName) + listener, err := apiServerMux.InitWorkloadClusterListener(resourceName) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + caKeyEncoded := r.URL.Query().Get("caKey") + caKeyRaw, err := base64.StdEncoding.DecodeString(caKeyEncoded) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + caCertEncoded := r.URL.Query().Get("caCert") + caCertRaw, err := base64.StdEncoding.DecodeString(caCertEncoded) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + + caCert, err := certs.DecodeCertPEM(caCertRaw) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + caKey, err := certs.DecodePrivateKeyPEM(caKeyRaw) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + + apiServerPod1 := "kube-apiserver-1" + err = apiServerMux.AddAPIServer(resourceName, apiServerPod1, caCert, caKey.(*rsa.PrivateKey)) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + etcdKeyEncoded := r.URL.Query().Get("etcdKey") + etcdKeyRaw, err := base64.StdEncoding.DecodeString(etcdKeyEncoded) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + + etcdCertEncoded := r.URL.Query().Get("etcdCert") + etcdCertRaw, err := base64.StdEncoding.DecodeString(etcdCertEncoded) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + // + etcdCert, err := certs.DecodeCertPEM(etcdCertRaw) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + etcdKey, err := certs.DecodePrivateKeyPEM(etcdKeyRaw) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + if etcdKey == nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + // + etcdPodMember1 := "etcd-1" + err = apiServerMux.AddEtcdMember(resourceName, etcdPodMember1, etcdCert, etcdKey.(*rsa.PrivateKey)) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + + resp.Host = listener.Host() + resp.Port = listener.Port() + data, _ := json.Marshal(resp) + io.WriteString(w, string(data)) + + c, err := listener.GetClient() + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + ctx := context.Background() + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:get-nodes", + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{""}, + Resources: []string{"nodes"}, + }, + }, + } + err = c.Create(ctx, role) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + roleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:get-nodes", + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "kubeadm:get-nodes", + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: "system:bootstrappers:kubeadm:default-node-token", + }, + }, + } + err = c.Create(ctx, roleBinding) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + // create kubeadm config map + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm-config", + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + "ClusterConfiguration": "", + }, + } + err = c.Create(ctx, cm) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } + +} + +func updateNode(w http.ResponseWriter, r *http.Request) { + resourceName := r.URL.Query().Get("resource") + listener := cloudMgr.GetResourceGroup(resourceName) + nodeName := r.URL.Query().Get("nodeName") + providerID := r.URL.Query().Get("providerID") + timeOutput := metav1.Now() + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Labels: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, + }, + Spec: corev1.NodeSpec{ + ProviderID: providerID, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + LastHeartbeatTime: timeOutput, + LastTransitionTime: timeOutput, + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + { + LastHeartbeatTime: timeOutput, + LastTransitionTime: timeOutput, + Type: corev1.NodeMemoryPressure, + Status: corev1.ConditionFalse, + Message: "kubelet has sufficient memory available", + Reason: "KubeletHasSufficientMemory", + }, + { + LastHeartbeatTime: timeOutput, + LastTransitionTime: timeOutput, + Message: "kubelet has no disk pressure", + Reason: "KubeletHasNoDiskPressure", + Status: corev1.ConditionFalse, + Type: corev1.NodeDiskPressure, + }, + { + LastHeartbeatTime: timeOutput, + LastTransitionTime: timeOutput, + Message: "kubelet has sufficient PID available", + Reason: "KubeletHasSufficientPID", + Status: corev1.ConditionFalse, + Type: corev1.NodePIDPressure, + }, + { + LastHeartbeatTime: timeOutput, + LastTransitionTime: timeOutput, + Message: "kubelet is posting ready status", + Reason: "KubeletReady", + Status: corev1.ConditionTrue, + Type: corev1.NodeReady, + }, + }, + NodeInfo: corev1.NodeSystemInfo{ + Architecture: "amd64", + BootID: "a4254236-e1e3-4462-97ed-4a25b8b29884", + OperatingSystem: "linux", + SystemUUID: "1ce97e94-730c-42b7-98da-f7dcc0b58e93", + }, + }, + } + c := listener.GetClient() + err := c.Create(ctx, node) + if err != nil { + io.WriteString(w, fmt.Sprintf("Error: %s", err.Error())) + } +} + +func main() { + ctrl.SetLogger(klog.Background()) + podIP := os.Getenv("POD_IP") + key, _ = certs.NewPrivateKey() + apiServerMux, _ = server.NewWorkloadClustersMux(cloudMgr, podIP) + http.HandleFunc("/register", register) + http.HandleFunc("/updateNode", updateNode) + err := http.ListenAndServe(":3333", nil) + if err != nil { + fmt.Printf("Error: %s", err.Error()) + } +} diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/manifests/cluster-template.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/cluster-template.yaml new file mode 100644 index 00000000..d3ade0df --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/cluster-template.yaml @@ -0,0 +1,177 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + clusterNetwork: + services: + cidrBlocks: + - 10.96.0.0/12 + pods: + cidrBlocks: + - 192.168.0.0/18 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: Metal3Cluster + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: Metal3Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CLUSTER_APIENDPOINT_HOST} + port: ${CLUSTER_APIENDPOINT_PORT} + noCloudProvider: true +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: Metal3MachineTemplate + name: ${CLUSTER_NAME}-controlplane + namespace: ${NAMESPACE} + nodeDrainTimeout: ${NODE_DRAIN_TIMEOUT:-"0s"} + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + rolloutStrategy: + rollingUpdate: + maxSurge: 1 + type: RollingUpdate + version: ${KUBERNETES_VERSION} + kubeadmConfigSpec: + joinConfiguration: + controlPlane: {} + nodeRegistration: + name: '{{ ds.meta_data.name }}' + initConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.name }}' +${CTLPLANE_KUBEADM_EXTRA_CONFIG} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: Metal3MachineTemplate +metadata: + name: ${CLUSTER_NAME}-controlplane + namespace: ${NAMESPACE} +spec: + nodeReuse: false + template: + spec: + automatedCleaningMode: metadata + dataTemplate: + name: ${CLUSTER_NAME}-controlplane-template + image: + checksum: ${IMAGE_CHECKSUM} + checksumType: ${IMAGE_CHECKSUM_TYPE} + format: ${IMAGE_FORMAT} + url: ${IMAGE_URL} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: nodepool-0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: nodepool-0 + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + nodepool: nodepool-0 + spec: + nodeDrainTimeout: ${NODE_DRAIN_TIMEOUT:-"0s"} + clusterName: ${CLUSTER_NAME} + version: ${KUBERNETES_VERSION} + bootstrap: + configRef: + name: ${CLUSTER_NAME}-workers + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-workers + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: Metal3MachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: Metal3MachineTemplate +metadata: + name: ${CLUSTER_NAME}-workers + namespace: ${NAMESPACE} +spec: + nodeReuse: false + template: + spec: + automatedCleaningMode: metadata + dataTemplate: + name: ${CLUSTER_NAME}-workers-template + image: + checksum: ${IMAGE_CHECKSUM} + checksumType: ${IMAGE_CHECKSUM_TYPE} + format: ${IMAGE_FORMAT} + url: ${IMAGE_URL} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-controlplane + namespace: ${NAMESPACE} +spec: + template: + spec: + initConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.name }}' + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.name }}' +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-workers + namespace: ${NAMESPACE} +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data.name }}' +${WORKERS_KUBEADM_EXTRA_CONFIG} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: Metal3DataTemplate +metadata: + name: ${CLUSTER_NAME}-controlplane-template + namespace: ${NAMESPACE} +spec: + clusterName: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: Metal3DataTemplate +metadata: + name: ${CLUSTER_NAME}-workers-template + namespace: ${NAMESPACE} +spec: + clusterName: ${CLUSTER_NAME} diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/manifests/etcd.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/etcd.yaml new file mode 100644 index 00000000..5449036c --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/etcd.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: Service +metadata: + name: etcd-server + labels: + app: etcd +spec: + ports: + - port: 2379 + name: client + selector: + app: etcd +--- +apiVersion: v1 +kind: Service +metadata: + name: etcd + labels: + app: etcd +spec: + ports: + - port: 2379 + name: client + - port: 2380 + name: peer + clusterIP: None + selector: + app: etcd +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: etcd + name: etcd +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app: etcd + serviceName: etcd + template: + metadata: + labels: + app: etcd + name: etcd + spec: + containers: + - command: + - etcd + - --data-dir=/var/run/etcd + - --name=$(POD_NAME) + - --initial-cluster-state=new + - --initial-cluster=etcd-0=https://etcd-0.etcd.$(POD_NAMESPACE).svc.cluster.local:2380 + - --initial-advertise-peer-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2380 + - --initial-cluster-token=etcd-cluster + - --listen-client-urls=https://0.0.0.0:2379 + - --advertise-client-urls=https://etcd-0.etcd.$(POD_NAMESPACE).svc.cluster.local:2379,https://etcd-server.$(POD_NAMESPACE).svc.cluster.local:2379 + - --client-cert-auth=true + - --trusted-ca-file=/etc/kubernetes/pki/ca/tls.crt + - --cert-file=/etc/kubernetes/pki/etcd/tls.crt + - --key-file=/etc/kubernetes/pki/etcd/tls.key + - --listen-peer-urls=https://0.0.0.0:2380 + - --peer-client-cert-auth=true + - --peer-trusted-ca-file=/etc/kubernetes/pki/ca/tls.crt + - --peer-cert-file=/etc/kubernetes/pki/etcd-peer/tls.crt + - --peer-key-file=/etc/kubernetes/pki/etcd-peer/tls.key + - --snapshot-count=8000 + - --auto-compaction-mode=periodic + - --auto-compaction-retention=5m + - --quota-backend-bytes=8589934592 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: registry.k8s.io/etcd:3.5.4-0 + imagePullPolicy: IfNotPresent + name: etcd + ports: + - containerPort: 2379 + name: client + protocol: TCP + - containerPort: 2380 + name: peer + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + # - mountPath: /var/run/etcd + # name: data + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + readOnly: true + - mountPath: /etc/kubernetes/pki/ca/tls.crt + name: ca + subPath: tls.crt + readOnly: true + - mountPath: /etc/kubernetes/pki/etcd-peer + name: etcd-peer + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: etcd-certs + secret: + secretName: etcd-server + - name: ca + secret: + secretName: CLUSTER-etcd + - name: etcd-peer + secret: + secretName: etcd-peer diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kube-apiserver-deployment.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kube-apiserver-deployment.yaml new file mode 100644 index 00000000..3ed4390a --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kube-apiserver-deployment.yaml @@ -0,0 +1,103 @@ +apiVersion: v1 +kind: Service +metadata: + name: test-kube-apiserver + labels: + app: test-kube-apiserver +spec: + ports: + - port: 6443 + name: https + selector: + app: test-kube-apiserver +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: test-kube-apiserver + name: test-kube-apiserver +spec: + replicas: 1 + selector: + matchLabels: + app: test-kube-apiserver + strategy: {} + template: + metadata: + labels: + app: test-kube-apiserver + spec: + containers: + - image: registry.k8s.io/kube-apiserver:v1.26.0 + name: kube-apiserver + ports: + - containerPort: 6443 + name: https + protocol: TCP + command: + - kube-apiserver + - --allow-privileged=true + - --authorization-mode=Node,RBAC + - --client-ca-file=/etc/kubernetes/pki/ca/tls.crt + # - --enable-admission-plugins=NodeRestriction + - --enable-bootstrap-token-auth=true + - --etcd-cafile=/etc/kubernetes/pki/etcd-ca/tls.crt + - --etcd-certfile=/etc/kubernetes/pki/etcd/tls.crt + - --etcd-keyfile=/etc/kubernetes/pki/etcd/tls.key + - --etcd-servers=https://etcd-server.etcd-system.svc.cluster.local:2379 + - --etcd-prefix=/CLUSTER + # - --runtime-config= + - --service-account-issuer=https://kubernetes.default.svc.cluster.local + - --service-account-key-file=/etc/kubernetes/pki/sa/tls.crt + - --service-account-signing-key-file=/etc/kubernetes/pki/sa/tls.key + - --service-cluster-ip-range=10.96.0.0/16 + - --tls-cert-file=/etc/kubernetes/pki/k8s/tls.crt + - --tls-private-key-file=/etc/kubernetes/pki/k8s/tls.key + - --etcd-compaction-interval=0 + # Attempt at reducing memory usage below + - --profiling=false + - --watch-cache=false + - --etcd-count-metric-poll-period=0 + - --etcd-db-metric-poll-interval=0 + - --enable-garbage-collector=false + - --enable-priority-and-fairness=false + - --runtime-config=api/alpha=false + # Enable AlwaysAdmit and disable everything else + - --enable-admission-plugins=AlwaysAdmit + - --disable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,TaintNodesByCondition,PodSecurity,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,CertificateSubjectRestriction,DefaultIngressClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + resources: {} + volumeMounts: + - mountPath: /etc/kubernetes/pki/etcd-ca + name: etcd-certs + readOnly: true + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-client + readOnly: true + - mountPath: /etc/kubernetes/pki/ca/tls.crt + name: ca + subPath: tls.crt + readOnly: true + - mountPath: /etc/kubernetes/pki/sa + name: sa-certs + readOnly: true + - mountPath: /etc/kubernetes/pki/k8s + name: k8s-certs + readOnly: true + volumes: + - name: etcd-certs + secret: + secretName: CLUSTER-etcd + - name: etcd-client + secret: + secretName: CLUSTER-apiserver-etcd-client + - name: ca + secret: + secretName: CLUSTER-ca + - name: sa-certs + secret: + secretName: CLUSTER-sa + - name: k8s-certs + secret: + secretName: apiserver +status: {} diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config-template.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config-template.yaml new file mode 100644 index 00000000..d21e66ba --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config-template.yaml @@ -0,0 +1,20 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - test-kube-apiserver.NAMESPACE.svc.cluster.local + - APISERVER +clusterName: CLUSTER +controlPlaneEndpoint: test-kube-apiserver.NAMESPACE.svc.cluster.local:6443 +etcd: + local: + # https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-LocalEtcd + serverCertSANs: + - etcd-server.etcd-system.svc.cluster.local + peerCertSANs: + - etcd-0.etcd.etcd-system.svc.cluster.local +kubernetesVersion: v1.26.0 +certificatesDir: /tmp/CLUSTER/pki diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config.yaml new file mode 100644 index 00000000..e44edce5 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/manifests/kubeadm-config.yaml @@ -0,0 +1,19 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - test-kube-apiserver.NAMESPACE.svc.cluster.local +clusterName: test +controlPlaneEndpoint: test-kube-apiserver.NAMESPACE.svc.cluster.local:6443 +etcd: + local: + # https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-LocalEtcd + serverCertSANs: + - etcd-server.etcd-system.svc.cluster.local + peerCertSANs: + - etcd-0.etcd.etcd-system.svc.cluster.local +kubernetesVersion: v1.26.0 +certificatesDir: /tmp/CLUSTER/pki diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/start-minikube.sh b/Support/Multitenancy/Multiple-Ironic-conductors/start-minikube.sh index 7e0b2ccf..69d51848 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/start-minikube.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/start-minikube.sh @@ -2,24 +2,39 @@ set -e # Start Minikube with insecure registry flag -minikube start --insecure-registry 172.22.0.1:5000 +minikube start --insecure-registry 172.22.0.1:5000 --memory 32000 --cpus 16 # SSH into the Minikube VM and execute the following commands sudo su -l -c "minikube ssh sudo brctl addbr ironicendpoint" "${USER}" sudo su -l -c "minikube ssh sudo ip link set ironicendpoint up" "${USER}" sudo su -l -c "minikube ssh sudo brctl addif ironicendpoint eth2" "${USER}" +IRONIC_DATA_DIR="/opt/metal3-dev-env/ironic/" + +minikube ssh "sudo mkdir -p /shared/html/images" +minikube cp ${IRONIC_DATA_DIR}/html/images/ironic-python-agent.kernel /shared/html/images/ +minikube cp ${IRONIC_DATA_DIR}/html/images/ironic-python-agent.initramfs /shared/html/images/ +minikube cp ${IRONIC_DATA_DIR}/html/images/ironic-python-agent.headers /shared/html/images/ + read -ra PROVISIONING_IPS <<< "${IRONIC_ENDPOINTS}" for PROVISIONING_IP in "${PROVISIONING_IPS[@]}"; do sudo su -l -c "minikube ssh sudo ip addr add ${PROVISIONING_IP}/24 dev ironicendpoint" "${USER}" done -# Firewall rules -for i in 8000 80 9999 6385 5050 6180 53 5000; do sudo firewall-cmd --zone=public --add-port=${i}/tcp; done -for i in 69 547 546 68 67 5353 6230 6231 6232 6233 6234 6235; do sudo firewall-cmd --zone=libvirt --add-port=${i}/udp; done - +ports=(8000 80 6385 5050 6180 53 5000 69 547 546 68 67 5353 6230) for i in $(seq 1 "${N_SUSHY:-1}"); do port=$(( 8000 + i )) - sudo firewall-cmd --zone=public --add-port=$port/tcp - sudo firewall-cmd --zone=libvirt --add-port=$port/tcp + ports+=(${port}) +done +for i in $(seq 1 "${N_FAKE_IPA:-1}"); do + port=$(( 9900 + i )) + ports+=(${port}) +done + +# Firewall rules +for i in "${ports[@]}"; do + sudo firewall-cmd --zone=public --add-port=${i}/tcp + sudo firewall-cmd --zone=public --add-port=${i}/udp + sudo firewall-cmd --zone=libvirt --add-port=${i}/tcp + sudo firewall-cmd --zone=libvirt --add-port=${i}/udp done diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/start_containers.sh b/Support/Multitenancy/Multiple-Ironic-conductors/start_containers.sh index d00d908f..7781e64c 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/start_containers.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/start_containers.sh @@ -5,15 +5,16 @@ SUSHY_CONF_DIR="${__dir__}/sushy-tools-conf" SUSHY_TOOLS_IMAGE="127.0.0.1:5000/localimages/sushy-tools" LIBVIRT_URI="qemu+ssh://root@192.168.111.1/system?&keyfile=/root/ssh/id_rsa_virt_power&no_verify=1&no_tty=1" ADVERTISE_HOST="192.168.111.1" -ADVERTISE_PORT="9999" API_URL="https://172.22.0.2:6385" CALLBACK_URL="https://172.22.0.2:5050/v1/continue" -rm -rf "$SUSHY_CONF_DIR" mkdir -p "$SUSHY_CONF_DIR" +rm -rf "$SUSHY_CONF_DIR" +mkdir -p "$SUSHY_CONF_DIR" mkdir -p "$SUSHY_CONF_DIR/ssh" +sudo mkdir -p /root/.ssh sudo ssh-keygen -f /root/.ssh/id_rsa_virt_power -P "" -q -y sudo cat /root/.ssh/id_rsa_virt_power.pub | sudo tee /root/.ssh/authorized_keys @@ -21,6 +22,7 @@ echo "Starting sushy-tools containers" # Start sushy-tools for i in $(seq 1 "$N_SUSHY"); do container_conf_dir="$SUSHY_CONF_DIR/sushy-$i" + fake_ipa_port=$(( 9901 + (( $i % ${N_FAKE_IPA} )) )) mkdir -p "${container_conf_dir}" cat <<'EOF' > "${container_conf_dir}"/htpasswd admin:$2b$12$/dVOBNatORwKpF.ss99KB.vESjfyONOxyH.UgRwNyZi1Xs/W2pGVS @@ -38,20 +40,50 @@ SUSHY_EMULATOR_AUTH_FILE = "/root/sushy/htpasswd" SUSHY_EMULATOR_FAKE_DRIVER = True SUSHY_EMULATOR_LISTEN_PORT = $(( 8000 + i )) FAKE_IPA_API_URL = "${API_URL}" +FAKE_IPA_URL = "http://${ADVERTISE_HOST}:${fake_ipa_port}" FAKE_IPA_INSPECTION_CALLBACK_URL = "${CALLBACK_URL}" -FAKE_IPA_ADVERTISE_ADDRESS = Host(hostname="${ADVERTISE_HOST}", port="${ADVERTISE_PORT}") +FAKE_IPA_ADVERTISE_ADDRESS = Host(hostname="${ADVERTISE_HOST}", port="${fake_ipa_port}") +SUSHY_FAKE_IPA_LISTEN_IP = "${ADVERTISE_HOST}" +SUSHY_FAKE_IPA_LISTEN_PORT = "${fake_ipa_port}" +SUSHY_EMULATOR_FAKE_IPA = True SUSHY_EMULATOR_FAKE_SYSTEMS = $(cat nodes.json) EOF sudo podman run -d --net host --name "sushy-tools-${i}" --pod infra-pod \ - -v "$container_conf_dir:/root/sushy" \ + -v "${container_conf_dir}":/root/sushy \ -v /root/.ssh:/root/ssh \ "${SUSHY_TOOLS_IMAGE}" done -# Start fake-ipa +# Start fake-ipas +for i in $(seq 1 ${N_FAKE_IPA:-1}); do sudo podman run --entrypoint='["sushy-fake-ipa", "--config", "/root/sushy/conf.py"]' \ - -d --net host --name fake-ipa --pod infra-pod \ - -v "$SUSHY_CONF_DIR/sushy-1":/root/sushy \ + -d --net host --name fake-ipa-${i} --pod infra-pod \ + -v "$SUSHY_CONF_DIR/sushy-${i}":/root/sushy \ -v /root/.ssh:/root/ssh \ "${SUSHY_TOOLS_IMAGE}" + + done + +# Download ipa image +cat << EOF >"ironic.env" +HTTP_PORT=6180 +PROVISIONING_INTERFACE=ironicendpoint +DHCP_RANGE=172.22.0.10,172.22.0.100 +DEPLOY_KERNEL_URL=http://172.22.0.2:6180/images/ironic-python-agent.kernel +DEPLOY_RAMDISK_URL=http://172.22.0.2:6180/images/ironic-python-agent.initramfs +IRONIC_ENDPOINT=https://172.22.0.2:6385/v1/ +IRONIC_INSPECTOR_ENDPOINT=https://172.22.0.2:5050/v1/ +CACHEURL=http://172.22.0.1/images +IRONIC_FAST_TRACK=true +EOF + +IRONIC_DATA_DIR="/opt/metal3-dev-env/ironic/" +IPA_DOWNLOADER_IMAGE="quay.io/metal3-io/ironic-ipa-downloader" +mkdir -p "${IRONIC_DATA_DIR}" + +sudo podman run -d --net host --privileged --name ipa-downloader \ + --env-file ironic.env \ + -v "${IRONIC_DATA_DIR}:/shared" "${IPA_DOWNLOADER_IMAGE}" /usr/local/bin/get-resource.sh + +export IRONIC_DATA_DIR diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/ubuntu.yaml b/Support/Multitenancy/Multiple-Ironic-conductors/ubuntu.yaml new file mode 100644 index 00000000..0a1e2467 --- /dev/null +++ b/Support/Multitenancy/Multiple-Ironic-conductors/ubuntu.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: ubuntu + labels: + app: ubuntu +spec: + containers: + - image: ubuntu + command: + - "sleep" + - "604800" + imagePullPolicy: IfNotPresent + name: ubuntu + restartPolicy: Always diff --git a/Support/Multitenancy/Multiple-Ironic-conductors/vm-setup.sh b/Support/Multitenancy/Multiple-Ironic-conductors/vm-setup.sh index 8e4e5891..35261b40 100755 --- a/Support/Multitenancy/Multiple-Ironic-conductors/vm-setup.sh +++ b/Support/Multitenancy/Multiple-Ironic-conductors/vm-setup.sh @@ -93,17 +93,51 @@ nmcli con load /etc/NetworkManager/system-connections/baremetal.nmconnection nmcli con up baremetal # install minikube -curl -LO https://storage.googleapis.com/minikube/releases/v1.25.2/minikube-linux-amd64 -install minikube-linux-amd64 /usr/local/bin/minikube +if [[ $(ls /usr/local/bin/minikube) == "" ]]; then + curl -LO https://storage.googleapis.com/minikube/releases/v1.31.0/minikube-linux-amd64 + install minikube-linux-amd64 /usr/local/bin/minikube +fi # Install kubectl -curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +if [[ $(ls /usr/local/bin/kubectl) == "" ]]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +fi # Install Helm -helm_api="https://api.github.com/repos/helm/helm/releases" -helm_release_tag="$(curl -sL "${helm_api}" | jq -r ".[].tag_name" | head -n 1 )" -filename="helm-${helm_release_tag}-linux-amd64.tar.gz" -wget -O "$filename.tar.gz" "https://get.helm.sh/${filename}" -tar -xf "$filename.tar.gz" -install -o root -g root -m 0755 linux-amd64/helm /usr/local/bin/helm -rm -rf "$filename.tar.gz" linux-amd64 minikube-linux-amd64 kubectl +if [[ $(ls /usr/local/bin/helm) == "" ]]; then + helm_api="https://api.github.com/repos/helm/helm/releases" + helm_release_tag="$(curl -sL "${helm_api}" | jq -r ".[].tag_name" | head -n 1 )" + helm_filename="helm-${helm_release_tag}-linux-amd64.tar.gz" + wget -O "$helm_filename" "https://get.helm.sh/${helm_filename}" + tar -xf "$helm_filename" + install -o root -g root -m 0755 linux-amd64/helm /usr/local/bin/helm +fi + +# Install kustomize +if [[ $(ls /usr/local/bin/kustomize) == "" ]]; then + kustomize_api="https://api.github.com/repos/kubernetes-sigs/kustomize/releases" + kustomize_release_tag="$(curl -sL "${kustomize_api}" | jq -r ".[].tag_name" | grep "kustomize" | head -n 1 )" + kustomize_filename="$(echo ${kustomize_release_tag} | sed -e 's/\//_/')_linux_amd64.tar.gz" + wget -O "${kustomize_filename}" "https://github.com/kubernetes-sigs/kustomize/releases/download/${kustomize_release_tag}/${kustomize_filename}" + tar -xf "${kustomize_filename}" + install -o root -g root -m 0755 kustomize /usr/local/bin/kustomize +fi + +if [[ $(ls /usr/local/bin/clusterctl) == "" ]]; then + clusterctl_api="https://api.github.com/repos/kubernetes-sigs/cluster-api/releases" + clusterctl_release_tag="$(curl -sL "${clusterctl_api}" | jq -r ".[].tag_name" | head -n 1 )" + clusterctl_filename="clusterctl" + wget -O "${clusterctl_filename}" "https://github.com/kubernetes-sigs/cluster-api/releases/download/${clusterctl_release_tag}/clusterctl-linux-amd64" + install -o root -g root -m 0755 "${clusterctl_filename}" /usr/local/bin/"${clusterctl_filename}" +fi + +if [[ $(ls /usr/local/bin/yq) == "" ]]; then + api="https://api.github.com/repos/mikefarah/yq/releases" + release_tag="$(curl -sL "${api}" | jq -r ".[].tag_name" | head -n 1 )" + filename="yq" + wget -O "${filename}" "https://github.com/mikefarah/yq/releases/download/${release_tag}/yq_linux_amd64" + install -o root -g root -m 0755 "${filename}" /usr/local/bin/"${filename}" +fi + +# Cleanup +rm -rf "${helm_filename}" "${kustomize_filename}" "${clusterctl_filename}" linux-amd64 minikube-linux-amd64 kubectl kustomize