diff --git a/stackIT/README.md b/stackIT/README.md new file mode 100644 index 000000000..a102f4cc4 --- /dev/null +++ b/stackIT/README.md @@ -0,0 +1,108 @@ +# StackIT Deployment and Configuration Guide + +This guide outlines the steps to set up and deploy Wire in a StackIT environment, including DNS configuration, Minikube cluster creation, Docker container setup, and Helm chart deployment. Each task and its associated commands are provided for clarity and customization. + +--- + +## Steps to Deploy + +### 1. Run the Ansible Playbook +- Prepare DNS records, StackIT public IP and set up Cert Manager to start before next step as mentioned [here](https://docs.wire.com/how-to/install/helm.html#how-to-set-up-dns-records). + - Check file `stackIT/host.ini` for host details + - Check file `stackIT/setting-values.sh` for DNS records i.e. TARGET_SYSTEM and CERT_MASTER_EMAIL + - We have used letsencrypt for example for cert management +- Use the following command to set up the VM: + ```bash + ansible-playbook -i stackIT/host.ini stackIT/stackit-vm-setup.yml --private-key ~/.ssh/stackit_private_key + ``` + +- **Optional Skips:** + The ansible playbook is seggregated into multiple blocks. Use the following variables to control the flow of tasks: + ```bash + -e skip_install=true + -e skip_ssh=true + -e skip_minikube=true + -e skip_docker=true + -e skip_inventory=true + -e skip_download=true + -e skip_iptables=true + -e skip_disable_kubespray=true + ``` + +- **Artifacts and Tasks:** + - Minikube Kubernetes cluster and four Docker containers to support node requirements. + - Generate `hosts.ini` based on the IPs of above containers for further ansible operations on node + - Download wire-server-deploy artifacts based on the specified hash + - Configure iptables rules for DNAT to Coturn and k8s Nginx Controller (used by Wire applications). + +--- + +### 2. Commands to Run on the StackIT Nodes in directory `wire-server-deploy` + +1. **Load the environment:** + ```bash + source stackIT/offline-env.sh + ``` + +2. **Generate secrets:** + ```bash + bash -x bin/offline-secrets.sh + ``` + +3. **Access the environment:** + ```bash + d bash + ``` + +4. **Set up and configure the environment:** + Run the following to set up the AssetHost, loading containers for k8s cluster, sync time, cassandra, elasticsearch and minio: + ```bash + bash -x bin/offline-cluster.sh + ``` + +5. **Deploy Helm charts:** + Use the following script to set up Helm chart values and deploy them: + ```bash + bash -x stackIT/setting-values.sh + ``` + +--- + +## To-Dos + +1. **Modify `bin/offline-env.sh`:** + - Add definitions for Kubernetes config for kubectl for non-kubespray environments like in stackIT + - When Copying ssh env from the host drop or re-initialize the known_hosts to avoid ip change clashes + +2. **Update `bin/offline-cluster.sh`:** + - Remove references to `restund.yml`. + - Introduce a check for Kubespray to avoid execution if Minikube is already running. + +3. **Enhance Helm charts:** + - Ensure pods reload when there are changes in: + - ConfigMaps. + - Environment variables. + - Public IPs parsed at pod startup. + - Introduce hashing to track changes and trigger restarts as needed. + - Current upgrades don't restart the pods for example, sftd and coturn + +--- + +### File Structure Highlights + +- **Ansible Playbook Files:** + - `stackIT/stackit-vm-setup.yml` + +- **Environment Scripts:** + - `stackIT/offline-env.sh` + - `../bin/offline-secrets.sh` + +- **Cluster and Helm Setup:** + - `../bin/offline-cluster.sh` + - `stackIT/setting-values.sh` + +--- + +## Notes +- Read all the files involved before executing them to understand defaults. + diff --git a/stackIT/host.ini b/stackIT/host.ini new file mode 100644 index 000000000..2c071b1fc --- /dev/null +++ b/stackIT/host.ini @@ -0,0 +1 @@ +StackIT-public-ip ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' diff --git a/stackIT/offline-env.sh b/stackIT/offline-env.sh new file mode 100644 index 000000000..7e5e67dff --- /dev/null +++ b/stackIT/offline-env.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') +export ZAUTH_CONTAINER + +WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') + +alias d="sudo docker run -it --network=host \ + -v \${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent \ + -e SSH_AUTH_SOCK=/ssh-agent \ + -v \$HOME/.ssh:/root/.ssh \ + -v \$PWD:/wire-server-deploy \ + -v /home/ubuntu/.kube:/root/.kube \ + -v /home/ubuntu/.minikube:/home/ubuntu/.minikube \ + -e KUBECONFIG=/root/.kube/config \ + \$WSD_CONTAINER" \ No newline at end of file diff --git a/stackIT/setting-values.sh b/stackIT/setting-values.sh new file mode 100644 index 000000000..6e9b1d58e --- /dev/null +++ b/stackIT/setting-values.sh @@ -0,0 +1,193 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2087 +set -Eeuo pipefail + +BASE_DIR="/wire-server-deploy" +TARGET_SYSTEM="mydomain.com" +CERT_MASTER_EMAIL="certmaster@mydomain.com" +# this IP should match the DNS A record for TARGET_SYSTEM +HOST_IP=$(wget -qO- https://api.ipify.org) +SFT_NODE="minikube" +COTURN_NODE="minikube-m03" +COTURN_NODE_IP=$(kubectl get node $COTURN_NODE -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') +CHART_URL="https://charts.jetstack.io/charts/cert-manager-v1.13.2.tgz" +NGINX_K8S_NODE="minikube-m02" + +# it create the values.yaml from prod-values.example.yaml/example.yaml to values.yaml +process_charts() { + + # values for cassandra-external, elasticsearch-external, minio-external are created from offline-cluster.sh - helm_external.yml + # List of Helm charts to process values are here: + charts=( + fake-aws demo-smtp + rabbitmq databases-ephemeral reaper wire-server webapp account-pages + team-settings smallstep-accomp cert-manager-ns + nginx-ingress-services sftd coturn + ) + + for chart in "${charts[@]}"; do + chart_dir="$BASE_DIR/values/$chart" + + if [[ -d "$chart_dir" ]]; then + if [[ -f "$chart_dir/prod-values.example.yaml" ]]; then + if [[ ! -f "$chart_dir/values.yaml" ]]; then + cp "$chart_dir/prod-values.example.yaml" "$chart_dir/values.yaml" + echo "Used template prod-values.example.yaml to create $chart_dir/values.yaml" + fi + fi + fi + + done + + # some manual secrets + if [[ ! -f "$BASE_DIR/values/rabbitmq/secrets.yaml" ]]; then + cp "$BASE_DIR/values/rabbitmq/prod-secrets.example.yaml" "$BASE_DIR/values/rabbitmq/secrets.yaml" + echo "Used template prod-secrets.example.yaml to create $BASE_DIR/values/rabbitmq/secrets.yaml" + fi + if [[ ! -f "$BASE_DIR/values/team-settings/secrets.yaml" ]]; then + cp "$BASE_DIR/values/team-settings/prod-secrets.example.yaml" "$BASE_DIR/values/team-settings/secrets.yaml" + echo "Used template prod-secrets.example.yaml to create $BASE_DIR/values/team-settings/secrets.yaml" + fi + if [[ ! -f "$BASE_DIR/values/ingress-nginx-controller/values.yaml" ]]; then + cp "$BASE_DIR/values/ingress-nginx-controller/hetzner-ci.example.yaml" "$BASE_DIR/values/ingress-nginx-controller/values.yaml" + echo "Used template hetzner-ci.example.yaml to create $BASE_DIR/values/ingress-nginx-controller/values.yaml" + fi +} + +process_values() { + TEMP_DIR=$(mktemp -d) + trap 'rm -rf $TEMP_DIR' EXIT + + # Fixing the hosts with TARGET_SYSTEM and setting the turn server + sed -e "s/example.com/$TARGET_SYSTEM/g" \ + -e "s/# - \"turn::80\"/- \"turn:$HOST_IP:3478\"/g" \ + -e "s/# - \"turn::80?transport=tcp\"/- \"turn:$HOST_IP:3478?transport=tcp\"/g" \ + "$BASE_DIR/values/wire-server/values.yaml" > "$TEMP_DIR/wire-server-values.yaml" + + # Fixing the hosts in webapp team-settings and account-pages charts + for chart in webapp team-settings account-pages; do + sed "s/example.com/$TARGET_SYSTEM/g" "$BASE_DIR/values/$chart/values.yaml" > "$TEMP_DIR/$chart-values.yaml" + done + + # Setting certManager and DNS records + sed -e 's/useCertManager: false/useCertManager: true/g' \ + -e "/certmasterEmail:$/s/certmasterEmail:/certmasterEmail: $CERT_MASTER_EMAIL/" \ + -e "s/example.com/$TARGET_SYSTEM/" \ + "$BASE_DIR/values/nginx-ingress-services/values.yaml" > "$TEMP_DIR/nginx-ingress-services-values.yaml" + + # adding nodeSelector for ingress controller as it should run as Deployment in the k8s cluster i.e. lack of external load balancer + sed -e 's/kind: DaemonSet/kind: Deployment/' \ + "$BASE_DIR/values/ingress-nginx-controller/values.yaml" > "$TEMP_DIR/ingress-nginx-controller-values.yaml" + if ! grep -q "kubernetes.io/hostname: $NGINX_K8S_NODE" "$TEMP_DIR/ingress-nginx-controller-values.yaml"; then + echo -e " nodeSelector:\n kubernetes.io/hostname: $NGINX_K8S_NODE" >> "$TEMP_DIR/ingress-nginx-controller-values.yaml" + fi + + # Fixing SFTD hosts and setting the cert-manager to http01 and setting the replicaCount to 1 + sed -e "s/webapp.example.com/webapp.$TARGET_SYSTEM/" \ + -e "s/sftd.example.com/sftd.$TARGET_SYSTEM/" \ + -e 's/name: letsencrypt-prod/name: letsencrypt-http01/' \ + -e "s/replicaCount: 3/replicaCount: 1/" \ + "$BASE_DIR/values/sftd/values.yaml" > "$TEMP_DIR/sftd-values.yaml" + + # Creating coturn values and secrets + ZREST_SECRET=$(grep -A1 turn "$BASE_DIR/values/wire-server/secrets.yaml" | grep secret | tr -d '"' | awk '{print $NF}') + cat >"$TEMP_DIR/coturn-secrets.yaml"<"$TEMP_DIR/coturn-values.yaml"< /etc/apt/sources.list.d/docker.list + args: + executable: /bin/bash + + - name: Update apt package index + apt: + update_cache: yes + + - name: Install Docker packages + apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + + - name: Add ubuntu user to the docker group + user: + name: "{{ ansible_user }}" # Replace with the username you want to modify + groups: docker + append: yes + + - name: Enable and start Docker service + systemd: + name: docker + enabled: yes + state: started + + - name: Reset SSH connection to apply dpcker group membership changes + meta: reset_connection + + - name: Install Minikube + get_url: + url: "https://github.com/kubernetes/minikube/releases/latest/download/minikube-linux-amd64" + dest: /usr/local/bin/minikube + mode: '0755' + + - name: Install kubectl + get_url: + url: "https://dl.k8s.io/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl" + dest: /usr/local/bin/kubectl + mode: '0755' + + when: skip_install | default(false) == false + + - name: Creating ssh key and storing it + # storing creds in the {{ ansible_user }} user's home directory + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Ensure the .ssh directory exists + file: + path: "/home/{{ ansible_user }}/.ssh" + state: directory + mode: '0700' + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + + - name: Generate SSH key if it does not exist + shell: | + if [ ! -f "/home/{{ ansible_user }}/.ssh/id_rsa" ]; then + ssh-keygen -t rsa -b 4096 -f "/home/{{ ansible_user }}/.ssh/id_rsa" -N "" -C "ansible-generated-key"; + fi + args: + creates: "/home/{{ ansible_user }}/.ssh/id_rsa" + + - name: Read the public key content + slurp: + src: "/home/{{ ansible_user }}/.ssh/id_rsa.pub" + register: ssh_key_content + + - name: Set the public key as a fact + set_fact: + ssh_public_key: "{{ ssh_key_content['content'] | b64decode }}" + + when: skip_ssh | default(false) == false + + - name: start k8s(minikube) cluster + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Check if Minikube is running + shell: minikube status + register: minikube_status + failed_when: false + changed_when: false + + - name: Start Minikube with specified configurations + shell: | + minikube start \ + --nodes={{ minikube_nodes }} \ + --cpus={{ minikube_cpus }} \ + --memory={{ minikube_memory }} \ + --disk-size={{ minikube_disk_size }} \ + --kubernetes-version="{{ kubernetes_version }}" \ + --container-runtime="{{ container_runtime }}" \ + --driver=docker \ + --extra-config=kubeadm.pod-network-cidr={{ pod_network_cidr }} + when: "'Running' not in minikube_status.stdout" + + - name: Retrieve node names from the cluster + shell: kubectl get nodes -o json | jq -r '.items[].metadata.name' + register: kube_node_names + + - name: Configure Node labels + shell: | + kubectl label node {{ item.1 }} wire.io/node={{ item.0 }} + loop: "{{ k8s_node_names | zip(kube_node_names.stdout_lines) | list }}" + register: label_output + + - name: Get list of running Minikube nodes + shell: minikube node list | awk '{print $1}' + register: minikube_nodes_raw + + - name: Add SSH key to all Minikube nodes + shell: | + minikube ssh --native-ssh=false -n {{ item }} -- "mkdir -p ~/.ssh && echo '{{ ssh_public_key }}' >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys" + args: + executable: /bin/bash + with_items: "{{ minikube_nodes_raw.stdout_lines }}" + async: 30 + poll: 5 + + when: skip_minikube | default(false) == false + + - name: Start Container Nodes + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Pull the base Ubuntu image + docker_image: + name: ubuntu:22.04 + source: pull + + - name: Write public key to a file + copy: + dest: /tmp/wire-deploy/id_rsa.pub + content: "{{ ssh_public_key }}" + + - name: Create Dockerfile + copy: + dest: /tmp/wire-deploy/Dockerfile + content: | + FROM ubuntu:22.04 + RUN apt update && apt install -y openssh-server systemd systemd-sysv cron && mkdir /var/run/sshd + RUN systemctl enable ssh + RUN systemctl enable cron + RUN echo "PermitRootLogin yes" >> /etc/ssh/sshd_config + RUN mkdir -p /root/.ssh + COPY id_rsa.pub /root/.ssh/authorized_keys + RUN chmod 600 /root/.ssh/authorized_keys + EXPOSE 22 + STOPSIGNAL SIGRTMIN+3 + CMD ["/sbin/init"] + + - name: Build the Docker image + shell: | + docker build --no-cache -t {{ image_name }} /tmp/wire-deploy + + - name: Create and start containers + docker_container: + name: "{{ item }}" + image: "{{ image_name }}" + state: started + restart_policy: always + hostname: "{{ item }}" + privileged: yes + network_mode: "{{ docker_network_name }}" + env: + container: "docker" + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: "host" + tmpfs: + - /run + - /run/lock + security_opts: + - seccomp=unconfined + - apparmor=unconfined + loop: "{{ container_node_names }}" + + when: skip_docker | default(false) == false + + - name: Generate hosts.ini with dynamic IPs + become: yes + become_user: "{{ ansible_user }}" + block: + + - name: Display running containers + shell: docker ps + register: docker_ps_output + + - name: Print Docker container information + debug: + var: docker_ps_output.stdout + + - name: Extract IPs of Minikube nodes + shell: | + kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP").address' + register: kube_ips + + - name: Store Minikube node IPs as variable + set_fact: + kubernetes_node_ips: "{{ kube_ips.stdout_lines }}" + + - name: Extract IPs of Docker containers + shell: | + docker inspect -f '{{ "{{ range.NetworkSettings.Networks }}{{ .IPAddress }}{{ end }}" }}' {{ item }} + loop: "{{ container_node_names }}" + register: docker_ips + + - name: Store Docker container IPs as variable + set_fact: + docker_container_ips: "{{ docker_ips.results | map(attribute='stdout') }}" + + - name: Display Kubernetes node IPs + debug: + msg: "Kubernetes Node IPs: {{ kubernetes_node_ips }}" + + - name: Display Docker container IPs + debug: + msg: "Docker Container IPs: {{ docker_container_ips }}" + + - name: Create dictionary for Kubernetes nodes and container IPs + set_fact: + host_ips: + kubenode1: "{{ kubernetes_node_ips[0] }}" + kubenode2: "{{ kubernetes_node_ips[1] }}" + kubenode3: "{{ kubernetes_node_ips[2] }}" + assethost: "{{ docker_container_ips[0] }}" + ansnode1: "{{ docker_container_ips[1] }}" + ansnode2: "{{ docker_container_ips[2] }}" + ansnode3: "{{ docker_container_ips[3] }}" + + - name: Generate hosts.ini content + set_fact: + hosts_ini_content: | + [all] + kubenode1 ansible_host={{ host_ips.kubenode1 }} ansible_user=docker + kubenode2 ansible_host={{ host_ips.kubenode2 }} ansible_user=docker + kubenode3 ansible_host={{ host_ips.kubenode3 }} ansible_user=docker + assethost ansible_host={{ host_ips.assethost }} ansible_user=root + ansnode1 ansible_host={{ host_ips.ansnode1 }} ansible_user=root + ansnode2 ansible_host={{ host_ips.ansnode2 }} ansible_user=root + ansnode3 ansible_host={{ host_ips.ansnode3 }} ansible_user=root + + [all:vars] + ansible_ssh_common_args = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + + [cassandra:vars] + cassandra_network_interface = eth0 + cassandra_backup_enabled = False + cassandra_incremental_backup_enabled = False + + [elasticsearch:vars] + elasticsearch_network_interface = eth0 + + [minio:vars] + minio_network_interface = eth0 + prefix = "" + domain = "kittensonfire.com" + deeplink_title = "wire demo environment, kittensonfire.com" + + [rmq-cluster:vars] + rabbitmq_network_interface = eth0 + + [kube-master] + kubenode1 + kubenode2 + kubenode3 + + [etcd] + kubenode1 etcd_member_name=etcd1 + kubenode2 etcd_member_name=etcd2 + kubenode3 etcd_member_name=etcd3 + + [kube-node] + kubenode1 + kubenode2 + kubenode3 + + [k8s-cluster:children] + kube-master + kube-node + + [cassandra] + ansnode1 + ansnode2 + ansnode3 + + [cassandra_seed] + ansnode1 + + [elasticsearch] + ansnode1 + ansnode2 + ansnode3 + + [elasticsearch_master:children] + elasticsearch + + [minio] + ansnode1 + ansnode2 + ansnode3 + + [rmq-cluster] + ansnode1 + ansnode2 + ansnode3 + + when: skip_inventory | default(false) == false + + - name: Download wire artifact + become: yes + become_user: "{{ ansible_user }}" + block: + - name: create wire-server-deploy directory for {{ ansible_user }} user + file: + path: /home/{{ ansible_user }}/wire-server-deploy + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: 0775 + + - name: check if wire-server-deploy-static-{{ artifact_hash }}.tgz exists + stat: + path: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + get_checksum: False + + register: artifact_archive_file_check + - name: download wire-server-deploy archive + shell: + cmd: curl -fsSLo /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-{{ artifact_hash }}.tgz + creates: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + when: not artifact_archive_file_check.stat.exists + + - name: check if wire-server-deploy folder contents exist + stat: + path: /home/{{ ansible_user }}/wire-server-deploy/containers-helm.tar + get_checksum: False + register: artifact_folder_content_check + + - name: unpack wire-server-deploy archive + unarchive: + src: /home/{{ ansible_user }}/wire-server-deploy-static-{{ artifact_hash }}.tgz + dest: /home/{{ ansible_user }}/wire-server-deploy + remote_src: yes + + when: not artifact_folder_content_check.stat.exists + - name: set permissions inside wire-server-deploy via shell command (fails when using ansible directive) + shell: + cmd: sudo chmod -R 0775 /home/{{ ansible_user }}/wire-server-deploy; sudo chown -R {{ ansible_user }}:{{ ansible_user }} /home/{{ ansible_user }} + + - name: Write updated hosts.ini to file + copy: + dest: /home/{{ ansible_user }}/wire-server-deploy/ansible/inventory/offline/hosts.ini + content: "{{ hosts_ini_content }}" + + when: skip_download | default(false) == false + + - name: Delete /tmp/wire-deploy directory with contents + file: + path: /tmp/wire-deploy + state: absent + + - name: Configure iptables rules + become: yes + block: + - name: Get the default interface for the default route + shell: ip route | awk '/default/ {print $5}' | head -n 1 + register: default_interface + changed_when: false + + - name: Get the IP address of the default interface + shell: ip -4 addr show dev {{ default_interface.stdout }} | awk '/inet / {print $2}' | cut -d/ -f1 + register: default_interface_ip + changed_when: false + + - name: Configure DNAT rules to send http/https traffic to the k8s ingress controller + iptables: + table: nat + chain: PREROUTING + protocol: "{{ item.protocol }}" + jump: DNAT + in_interface: "{{ default_interface.stdout }}" + destination: "{{ default_interface_ip.stdout }}" + destination_port: "{{ item.port }}" + to_destination: "{{ k8s_ingress_controller_ip }}:{{ item.to_port }}" + state: present + action: insert + loop: "{{ http_dnat_rules }}" + loop_control: + label: "Setting DNAT rule for port {{ item.port }} -> {{ k8s_ingress_controller_ip }}:{{ item.to_port }}" + + - name: Get the {{ docker_network_name }} Docker network ID + shell: | + docker network inspect {{ docker_network_name }} | jq -r '.[0].Id' + register: docker_network_id + changed_when: false + + - name: Get all interfaces with bridge interfaces + shell: ip -o addr show | awk '{print $2}' | grep -i 'br-' + register: bridge_interfaces + changed_when: false + + - name: Find the matching bridge interface for {{ docker_network_name }} Docker network + shell: | + for iface in {{ bridge_interfaces.stdout_lines | join(' ') }}; do + iface_id=$(echo "$iface" | cut -d '-' -f2) + if echo "{{ docker_network_id.stdout }}" | grep -q "$iface_id"; then + echo "$iface" + break + fi + done + register: matching_bridge_interface + changed_when: false + + - name: Ensure FORWARD rule for traffic from main interface to ingress controller + iptables: + table: filter + chain: FORWARD + in_interface: "{{ default_interface.stdout }}" + out_interface: "{{ matching_bridge_interface.stdout }}" + jump: ACCEPT + state: present + action: insert + + - name: Ensure FORWARD rule for traffic from ingress controller to main interface + iptables: + table: filter + chain: FORWARD + in_interface: "{{ matching_bridge_interface.stdout }}" + out_interface: "{{ default_interface.stdout }}" + jump: ACCEPT + state: present + action: insert + + - name: Configure DNAT rule to send UDP traffic for coturn to coturn server on k8s node + iptables: + table: nat + chain: PREROUTING + protocol: udp + jump: DNAT + destination: "{{ default_interface_ip.stdout }}" + destination_ports: "32768:61000" + in_interface: "{{ default_interface.stdout }}" + to_destination: "{{ coturn_k8s_node_ip }}" + state: present + action: insert + + - name: Configure DNAT rules to reach turn servers running on k8s node + iptables: + table: nat + chain: PREROUTING + protocol: "{{ item.protocol }}" + jump: DNAT + in_interface: "{{ default_interface.stdout }}" + destination: "{{ default_interface_ip.stdout }}" + destination_port: "{{ item.port }}" + to_destination: "{{ coturn_k8s_node_ip }}:{{ item.to_port }}" + state: present + action: insert + loop: "{{ turn_dnat_rules }}" + loop_control: + label: "Setting DNAT rule for port {{ item.port }} -> {{ coturn_k8s_node_ip }}:{{ item.to_port }}" + + - name: Ensure /etc/iptables directory exists + ansible.builtin.file: + path: /etc/iptables + state: directory + owner: root + group: root + mode: '0755' + + - name: Save iptables rules + shell: iptables-save -f /etc/iptables/rules.v4 + + when: skip_iptables | default(false) == false + + - name: disabling kubespray in offline-cluster.sh + become: yes + become_user: "{{ ansible_user }}" + block: + - name: Comment specific lines in offline-cluster.sh + ansible.builtin.lineinfile: + path: /home/{{ ansible_user }}/wire-server-deploy/bin/offline-cluster.sh + regexp: '^ansible-playbook -i \$INVENTORY_FILE \$ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine' + line: '# ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --tags bastion,bootstrap-os,preinstall,container-engine' + state: present + + - name: Comment another specific line in offline-cluster.sh + ansible.builtin.lineinfile: + path: /home/{{ ansible_user }}/wire-server-deploy/bin/offline-cluster.sh + regexp: '^ansible-playbook -i \$INVENTORY_FILE \$ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine,multus' + line: '# ansible-playbook -i $INVENTORY_FILE $ANSIBLE_DIR/kubernetes.yml --skip-tags bootstrap-os,preinstall,container-engine,multus' + state: present + + when: skip_disable_kubespray | default(false) == false