Skip to content

Commit

Permalink
Merge pull request #115 from dejo1307/april_0.4.0_fixes
Browse files Browse the repository at this point in the history
April 0.4.0 fixes
  • Loading branch information
sabrina-yee authored May 18, 2021
2 parents 44b83ec + efcf1d8 commit 10bc344
Show file tree
Hide file tree
Showing 21 changed files with 203 additions and 46 deletions.
4 changes: 2 additions & 2 deletions roles/hcl/component-pack/tasks/setup_bootstrap.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@
become_user: "{{ __sudo_user }}"

- name: Wait for bootstrap to come up
pause:
seconds: 60
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=complete job/bootstrap --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Get bootstrap POD name
shell: kubectl get pods -n {{ __default_namespace }} | grep bootstrap | grep Completed | awk {'print $1'}
Expand Down
6 changes: 3 additions & 3 deletions roles/hcl/component-pack/tasks/setup_customizer.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@
command: "helm upgrade mw-proxy {{ customizer_tgz.files[0].path }} -i -f {{ __customizer_env }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Give it some space...
pause:
seconds: 30
- name: Wait for mw-proxy to come up
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=name=mw-proxy --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if customizer is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i mw-proxy | grep -i Running
Expand Down
6 changes: 3 additions & 3 deletions roles/hcl/component-pack/tasks/setup_elasticsearch7.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
command: "helm upgrade elasticsearch7 {{ elasticsearch7_tgz.files[0].path }} -i -f {{ __elasticsearch_env }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Give it some space...
pause:
seconds: 300
- name: Wait for es-data-7-0 pod to become ready
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=statefulset.kubernetes.io/pod-name=es-data-7-0 --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if elasticsearch master is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i es-master-7 | grep -i Running
Expand Down
4 changes: 2 additions & 2 deletions roles/hcl/component-pack/tasks/setup_infrastructure.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
become_user: "{{ __sudo_user }}"

- name: Wait for infrastructure to come up
pause:
seconds: 300
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=app=mongo --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if appregistry-client is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i appregistry-client | grep -i Running
Expand Down
4 changes: 0 additions & 4 deletions roles/hcl/component-pack/tasks/setup_kudosboards.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,3 @@
- name: Setup kudosboards
command: "helm upgrade kudos-boards-cp {{ kudosboards_tgz.files[0].path }} -i -f {{ __kudos_boards_destination }} --namespace {{ __default_namespace }} --recreate-pods"
become_user: "{{ __sudo_user }}"

- name: Wait for kudosboards to come up
pause:
seconds: 30
6 changes: 3 additions & 3 deletions roles/hcl/component-pack/tasks/setup_orientme.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
command: "helm upgrade orientme {{ orientme_tgz.files[0].path }} -i -f {{ __orientme_env }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Give it some space...
pause:
seconds: 300
- name: Wait for people-migrate pod to become ready
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=app=people-migrate --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if orientme is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i orient | grep -i Running
Expand Down
4 changes: 0 additions & 4 deletions roles/hcl/component-pack/tasks/setup_outlook_addin.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,3 @@
- name: Setup connections-outlook-desktop
command: "helm upgrade connections-outlook-desktop {{ outlook_tgz.files[0].path }} -i -f {{ __outlook_addin_destination }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Wait for connections-outlook-desktop to come up
pause:
seconds: 30
6 changes: 3 additions & 3 deletions roles/hcl/component-pack/tasks/setup_tailored_exp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
command: "helm upgrade tailored-exp {{ tailored_exp_tgz.files[0].path }} -i -f {{ __tailored_exp_env }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Give it some space...
pause:
seconds: 60
- name: Wait for te-creation-wizard to come up
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=name=te-creation-wizard --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if tailored experience admin portal is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i admin-portal | grep -i Running
Expand Down
6 changes: 3 additions & 3 deletions roles/hcl/component-pack/tasks/setup_teams.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
command: "helm upgrade teams {{ teams_tgz.files[0].path }} -i -f {{ __teams_env }} --namespace {{ __default_namespace }}"
become_user: "{{ __sudo_user }}"

- name: Give it some space...
pause:
seconds: 60
- name: Wait for teams to come up
shell: kubectl wait --namespace {{ __default_namespace }} --for=condition=ready pod --selector=name=teams-share-service --timeout=300s
become_user: "{{ __sudo_user }}"

- name: Check if teams share service is up and running
shell: kubectl get pods -n {{ __default_namespace }} | grep -i teams-share-service | grep -i Running
Expand Down
12 changes: 6 additions & 6 deletions roles/hcl/docs/tasks/get_cluster_names.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,23 @@
- name: Set cluster names for small topology
set_fact:
__cnx_files_cluster: "{{ cnx_files_cluster if (cnx_files_cluster is defined) else 'Apps' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_files_cluster is defined) else 'Apps' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_files_cluster is defined) else 'Apps' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_news_cluster is defined) else 'Apps' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_common_cluster is defined) else 'Apps' }}"
when:
- __cnx_deploy_type == "small"

- name: Set cluster names for medium topology
set_fact:
__cnx_files_cluster: "{{ cnx_files_cluster if (cnx_files_cluster is defined) else 'Apps' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_files_cluster is defined) else 'Infra' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_files_cluster is defined) else 'Infra' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_news_cluster is defined) else 'Infra' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_common_cluster is defined) else 'Infra' }}"
when:
- __cnx_deploy_type == "medium"

- name: Set cluster names for large topology
set_fact:
__cnx_files_cluster: "{{ cnx_files_cluster if (cnx_files_cluster is defined) else 'Files' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_files_cluster is defined) else 'News' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_files_cluster is defined) else 'Common' }}"
__cnx_news_cluster: "{{ cnx_news_cluster if (cnx_news_cluster is defined) else 'News' }}"
__cnx_common_cluster: "{{ cnx_common_cluster if (cnx_common_cluster is defined) else 'Common' }}"
when:
- __cnx_deploy_type == "large"
2 changes: 1 addition & 1 deletion roles/hcl/docs/vars/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ __shared_data_dir: "shared_data_dir="
__shared_data_dir_r: "shared_data_dir={{ __docs_data_local_path }}"
__editor_scope_name_r: "scope_name={{ __editor_cluster_name }}"

__db_hostname_default: "{{ db_hostname | default( inventory_hostname ) }}"
__db_hostname_default: "{{ db_hostname | default( hostvars[groups['db_servers'][0]]['inventory_hostname'] ) }}"
__db_hostname: "db_hostname="
__db_hostname_r: "db_hostname={{ __db_hostname_default }}"
#__db_type is set by get_db_type.yml
Expand Down
2 changes: 1 addition & 1 deletion roles/third_party/helm-install/vars/main.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
---
__helm_version: "{{ helm_version | default('3.5.2') }}"
__helm_version: "{{ helm_version | default('3.5.4') }}"
__helm_url: "https://get.helm.sh/helm-v{{ __helm_version }}-linux-amd64.tar.gz"
__helm_install_dir: "{{ helm_install_dir | default('/opt/helm') }}"
Original file line number Diff line number Diff line change
Expand Up @@ -1128,4 +1128,4 @@ RewriteCond %{REQUEST_METHOD} OPTIONS
RewriteRule ^(.*)$ $1 [R=200,L]

# Teams SameSite Fix
Header edit Set-Cookie ^(.*)$ "$1; SameSite=None"
# Header edit Set-Cookie ^(.*)$ "$1; SameSite=None"
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
---
- name: "Create install directory {{ __kube_binaries_install_dir }}"
file:
path: "{{ __kube_binaries_install_dir }}"
state: directory
mode: "0755"
become: true

- name: "Download kubectl {{ __kubernetes_version }} from {{ __kube_binaries_download_url }} to {{ __kube_binaries_install_dir }}"
get_url:
url: "{{ __kube_binaries_download_url }}/v{{ __kubernetes_version }}/bin/linux/amd64/kubectl"
dest: "{{ __kube_binaries_install_dir }}/kubectl-{{ __kubernetes_version }}"
mode: "0755"
become: true

- name: "Create kubectl symlink to {{ __kube_binaries_install_dir }}/kubectl-{{ __kubernetes_version }}"
file:
src: "{{ __kube_binaries_install_dir }}/kubectl-{{ __kubernetes_version }}"
dest: "{{ __kube_binaries_install_dir }}/kubectl"
state: link
become: true
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,21 @@
state: present
disable_excludes: kubernetes

- name: Find kubectl
shell: "whereis kubectl | awk '{ print $2 }'"
register: kubectl_loc
ignore_errors: true

- name: Remove kubectl so the desire version will be installed {{ kubectl_loc.stdout }}
file:
path: "{{ kubectl_loc.stdout }}"
state: absent
ignore_errors: true
when:
- kubectl_loc.rc == 0

- name: Install Kubernetes binaries - kubectl
yum:
name: kubectl-{{ __kubernetes_version }}-0
state: present
disable_excludes: kubernetes
include_tasks: install_kubectl.yml

- name: Add extra params for Kubelet
template:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
__kubernetes_version: "{{ kubernetes_version | default('1.18.10') }}"
__kubernetes_version: "{{ kubernetes_version | default('1.18.19') }}"

__ansible_cache: "/tmp/k8s_ansible"

Expand All @@ -8,3 +8,6 @@ __kubernetes_yum_repo_location: "/etc/yum.repos.d/kubernetes.repo"

__kubelet_systemd_template: "etc.sysconfig.kubelet.j2"
__kubelet_systemd_location: "/etc/sysconfig/kubelet"

__kube_binaries_install_dir: "{{ kube_binaries_install_dir | default('/usr/bin') }}"
__kube_binaries_download_url: "{{ kube_binaries_download_url | default('https://storage.googleapis.com/kubernetes-release/release') }}"
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,18 @@
state: present
disable_excludes: all

- name: Install Kubernetes binaries - kubectl
yum:
name: kubectl-{{ __kubernetes_version }}
state: present
disable_excludes: all
- name: Find kubectl
shell: "whereis kubectl | awk '{ print $2 }'"
register: kubectl_loc
ignore_errors: true

- name: Remove kubectl so the desire version will be installed {{ kubectl_loc.stdout }}
file:
path: "{{ kubectl_loc.stdout }}"
state: absent
ignore_errors: true
when:
- kubectl_loc.rc == 0

- name: Upgrade Kubernetes binaries - kubectl
include_tasks: upgrade_kubectl.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
---
- name: Run kubectl version
command: "kubectl version --client --short"
register: kubectl_version_output
ignore_errors: True

- name: Get kubectl version
set_fact:
kubectl_version: "{{ kubectl_version_output.stdout | regex_search('Client Version:\\sv(.*)', '\\1') | first }}"
when: kubectl_version_output.rc == 0

- set_fact:
kubectl_version: "0"
when: kubectl_version_output.rc != 0

- debug:
msg:
- "Current kubectl version = {{ kubectl_version }}, 0 means undetermined or not installed"
- "Target kubectl upgrade version = {{ __kubernetes_version }}"

- name: Delete old version if exists
file:
path: "{{ __kube_binaries_install_dir }}/kubectl"
state: absent
when: __kubernetes_version is version( kubectl_version, '>')

- name: Install new kubectl version
include_tasks:
file: ../../kubernetes-install/tasks/install_kubectl.yml
when: __kubernetes_version is version( kubectl_version, '>')
3 changes: 3 additions & 0 deletions roles/third_party/kubernetes/kubernetes-upgrade/vars/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,6 @@ __kubeadm_config_location: "{{ __ansible_cache }}/kubeadm-config.yaml"

__load_balancer_dns: "{{ load_balancer_dns | default('localhost') }}"
__pod_subnet: "{{ pod_subnet | default('192.168.0.0/16') }}"

__kube_binaries_install_dir: "{{ kube_binaries_install_dir | default('/usr/bin') }}"
__kube_binaries_download_url: "{{ kube_binaries_download_url | default('https://storage.googleapis.com/kubernetes-release/release') }}"
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v{{ __kubernetes_version }}
apiServer:
{% if enable_pod_security is defined %}
extraArgs:
enable-admission-plugins: PodSecurityPolicy
{% endif %}
{% if single_node_installation is not defined %}
certSANS:
- "{{ __load_balancer_dns }}"
controlPlaneEndpoint: "{{ __load_balancer_dns }}:6443"
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://{{ ansible_default_ipv4.address }}:2379"
advertise-client-urls: "https://{{ ansible_default_ipv4.address }}:2379"
listen-peer-urls: "https://{{ ansible_default_ipv4.address }}:2380"
initial-advertise-peer-urls: "https://{{ ansible_default_ipv4.address }}:2380"
{% if inventory_hostname == hostvars[groups['k8s_masters'][0]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380"
{% elif inventory_hostname == hostvars[groups['k8s_masters'][1]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][1]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][1]]['ansible_default_ipv4']['address'] }}:2380"
{% elif inventory_hostname == hostvars[groups['k8s_masters'][2]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][1]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][1]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][2]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][2]]['ansible_default_ipv4']['address'] }}:2380"
{% endif %}
{% if inventory_hostname != hostvars[groups['k8s_masters'][0]]['inventory_hostname'] %}
initial-cluster-state: existing
{% endif %}
serverCertSANs:
- {{ inventory_hostname }}
- {{ ansible_default_ipv4.address }}
peerCertSANs:
- {{ inventory_hostname }}
- {{ ansible_default_ipv4.address }}
{% endif %}
networking:
# This CIDR is a Calico default. Substitute or remove for your CNI provider.
podSubnet: "{{ __pod_subnet }}"
{% if inventory_hostname != hostvars[groups['k8s_masters'][0]]['inventory_hostname'] %}
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
kubernetesVersion: v{{ __kubernetes_version }}
apiServer:
{% if enable_pod_security is defined %}
extraArgs:
enable-admission-plugins: PodSecurityPolicy
{% endif %}
{% if single_node_installation is not defined %}
certSANS:
- "{{ __load_balancer_dns }}"
controlPlaneEndpoint: "{{ __load_balancer_dns }}:6443"
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://{{ ansible_default_ipv4.address }}:2379"
advertise-client-urls: "https://{{ ansible_default_ipv4.address }}:2379"
listen-peer-urls: "https://{{ ansible_default_ipv4.address }}:2380"
initial-advertise-peer-urls: "https://{{ ansible_default_ipv4.address }}:2380"
{% if inventory_hostname == hostvars[groups['k8s_masters'][0]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380"
{% elif inventory_hostname == hostvars[groups['k8s_masters'][1]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][1]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][1]]['ansible_default_ipv4']['address'] }}:2380"
{% elif inventory_hostname == hostvars[groups['k8s_masters'][2]]['inventory_hostname'] %}
initial-cluster: "{{ hostvars[groups['k8s_masters'][0]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][0]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][1]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][1]]['ansible_default_ipv4']['address'] }}:2380,{{ hostvars[groups['k8s_masters'][2]]['inventory_hostname'] }}=https://{{ hostvars[groups['k8s_masters'][2]]['ansible_default_ipv4']['address'] }}:2380"
{% endif %}
{% if inventory_hostname != hostvars[groups['k8s_masters'][0]]['inventory_hostname'] %}
initial-cluster-state: existing
{% endif %}
serverCertSANs:
- {{ inventory_hostname }}
- {{ ansible_default_ipv4.address }}
peerCertSANs:
- {{ inventory_hostname }}
- {{ ansible_default_ipv4.address }}
{% endif %}
networking:
# This CIDR is a Calico default. Substitute or remove for your CNI provider.
podSubnet: "{{ __pod_subnet }}"
{% endif %}
7 changes: 7 additions & 0 deletions roles/third_party/linux/swap-setup/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
- name: Check if swap is already configured on the server
shell: /usr/bin/cat /proc/swaps | grep -v Filename
#changed_when: true
register: swap_is_already_configured
ignore_errors: true

- name: Verify if swap is already configured by Ansible
stat:
path: "{{ __swapfile_success }}"
Expand All @@ -7,3 +13,4 @@
include_tasks: configure_swap.yml
when:
- not swap_configured_by_ansible.stat.exists
- swap_is_already_configured.rc != 0

0 comments on commit 10bc344

Please sign in to comment.