Skip to content

Commit

Permalink
Merge pull request #1578 from pixiake/release-3.1-upgrade
Browse files Browse the repository at this point in the history
Support for automatic upgrade
  • Loading branch information
pixiake authored Jun 30, 2021
2 parents 48fa0cf + 8240bd6 commit ae80360
Show file tree
Hide file tree
Showing 9 changed files with 70 additions and 43 deletions.
34 changes: 30 additions & 4 deletions controller/installRunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
"name": "ks-installer",
"namespace": "kubesphere-system",
"labels": {
"version": "v3.1.0"
"version": "v3.1.1-rc.1"
},
},
}
Expand Down Expand Up @@ -465,6 +465,27 @@ def generate_new_cluster_configuration(api):
"enabled": True
}

if "logging" in cluster_configuration_spec and "logsidecarReplicas" in cluster_configuration_spec[
"logging"]:
upgrade_flag = True
if "enabled" in cluster_configuration_spec["logging"]:
if cluster_configuration_spec["logging"]["enabled"]:
cluster_configuration_spec["logging"] = {
"enabled": True,
"logsidecar": {
"enabled": True,
"replicas": 2
}
}
else:
cluster_configuration_spec["logging"] = {
"enabled": False,
"logsidecar": {
"enabled": False,
"replicas": 2
}
}

if "notification" in cluster_configuration_spec:
upgrade_flag = True
del cluster_configuration_spec['notification']
Expand Down Expand Up @@ -514,12 +535,17 @@ def generate_new_cluster_configuration(api):
},
}
del cluster_configuration_spec["networkpolicy"]

if "core" in cluster_configuration_status:
if ("version" in cluster_configuration_status["core"] and cluster_configuration_status["core"]["version"] !=
cluster_configuration["metadata"]["labels"]["version"]) or "version" not in cluster_configuration_status["core"]:
upgrade_flag = True

if upgrade_flag:
cluster_configuration["spec"] = cluster_configuration_spec
if "status" in old_cluster_configuration and "clusterId" in old_cluster_configuration[
"status"]:
if "clusterId" in cluster_configuration_status:
cluster_configuration["status"] = {
"clusterId": old_cluster_configuration["status"]["clusterId"]
"clusterId": cluster_configuration_status["clusterId"]
}
delete_cluster_configuration(api)
create_cluster_configuration(api, cluster_configuration)
Expand Down
11 changes: 11 additions & 0 deletions roles/check-result/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,17 @@
shell: "{{ bin_dir }}/kubectl get svc ks-console -n kubesphere-system -o=jsonpath='{.spec.ports[0].nodePort}'"
register: ks_console_svc_port

- name: KubeSphere | Importing ks-core status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"core": {"version": "{{ ks_version }}"}}}'
-n kubesphere-system
register: import
failed_when: "import.stderr and 'Warning' not in import.stderr"
until: import is succeeded
retries: 5
delay: 3

- name: KubeSphere | Creating info_file
template:
Expand Down
11 changes: 5 additions & 6 deletions roles/download/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
image_arch: "{{host_architecture | default('amd64')}}"

# Versions
ks_version: 2.1.0
kube_version: v1.15.5
etcd_version: v3.2.18

Expand All @@ -59,7 +58,7 @@ base_library_repo: >-
{%- endif %}
ks_image_tag: "{{ dev_tag | default('release-3.1') }}"
ks_version: "{{ dev_tag | default('v3.1.1-rc.1') }}"

# Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
Expand All @@ -72,17 +71,17 @@ ks_image_tag: "{{ dev_tag | default('release-3.1') }}"

#KubeSphere:
ks_apiserver_repo: "{{ base_repo }}{{ namespace_override | default('kubespheredev') }}/ks-apiserver"
ks_apiserver_tag: "{{ ks_image_tag }}"
ks_apiserver_tag: "{{ ks_version }}"
ks_controller_manager_repo: "{{ base_repo }}{{ namespace_override | default('kubespheredev') }}/ks-controller-manager"
ks_controller_manager_tag: "{{ ks_image_tag }}"
ks_controller_manager_tag: "{{ ks_version }}"
ks_update_repo: "{{ base_repo }}{{ namespace_override | default('kubesphere') }}/ks-upgrade"
ks_update_tag: v3.0.0
ks_devops_migration_repo: "{{ base_repo }}{{ namespace_override | default('kubesphere') }}/ks-devops"
ks_devops_migration_tag: "flyway-v3.0.0"
ks_alerting_migration_repo: "{{ base_repo }}{{ namespace_override | default('kubesphere') }}/ks-alerting-migration"
ks_alerting_migration_repo: "{{ base_repo }}{{ namespace_override | default('kubespheredev') }}/ks-alerting-migration"
ks_alerting_migration_tag: "v3.1.0"
ks_console_repo: "{{ base_repo }}{{ namespace_override | default('kubespheredev') }}/ks-console"
ks_console_tag: "{{ ks_image_tag }}"
ks_console_tag: "{{ ks_version }}"
ks_kubectl_repo: "{{ base_repo }}{{ namespace_override | default('kubesphere') }}/kubectl"
ks_kubectl_tag: v1.0.0
# kubectl versions
Expand Down
4 changes: 0 additions & 4 deletions roles/ks-core/ks-core/defaults/main.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
---

kubernetes_ca_crt: "{{ kube_cert_dir }}/ca.crt"
kubernetes_ca_key: "{{ kube_cert_dir }}/ca.key"

ks_version: v3.1.0
kube_version: v1.16.6
openpitrix_version: v0.3.5

Expand Down
5 changes: 0 additions & 5 deletions roles/ks-core/prepare/defaults/main.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1 @@
---

ks_version: v3.0.0

kubernetes_ca_crt: "{{ kube_cert_dir }}/ca.crt"
kubernetes_ca_key: "{{ kube_cert_dir }}/ca.key"
5 changes: 2 additions & 3 deletions roles/ks-monitor/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,10 @@
until: import is succeeded
retries: 5
delay: 3
when:
- "status.monitoring is not defined or status.monitoring.status is not defined or status.monitoring.status != 'enabled'"

- import_tasks: thanosruler.yaml
when:
- alerting is defined
- alerting.enabled is defined
- alerting.enabled == true
- alerting.enabled == true
- "status.alerting is not defined or status.alerting.status is not defined or status.alerting.status != 'enabled'"
2 changes: 2 additions & 0 deletions roles/ks-monitor/tasks/prometheus-stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
- "status.monitoring is not defined or status.monitoring.status is not defined or status.monitoring.status != 'enabled'"

- import_tasks: notification-manager.yaml
when:
- "status.monitoring is not defined or status.monitoring.status is not defined or status.monitoring.status != 'enabled'"

- import_tasks: monitoring-dashboard.yaml
when:
Expand Down
14 changes: 13 additions & 1 deletion roles/ks-monitor/tasks/thanosruler.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
---
- name: Monitoring | Installing thanosruler
shell: >
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/thanosruler
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/thanosruler
- name: Monitoring | Importing alerting status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"alerting": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}'
-n kubesphere-system
register: import
failed_when: "import.stderr and 'Warning' not in import.stderr"
until: import is succeeded
retries: 5
delay: 3
27 changes: 7 additions & 20 deletions roles/telemetry/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -68,47 +68,34 @@
register: number
ignore_errors: true

- name: Get clusterId by cm of kubesphere-config
- name: Get clusterId by cc of ks-installer
shell: >
{{ bin_dir }}/kubectl get cc ks-installer -n kubesphere-system -o yaml | grep "clusterId"
{{ bin_dir }}/kubectl get cc ks-installer -n kubesphere-system -o=jsonpath='{.status.clusterId}'
register: check
ignore_errors: true

- name: KubeSphere | Creating KubeSphere clusterId
shell: >
uuidgen -r -t
echo $(cat /proc/sys/kernel/random/uuid)-$(date '+%s')
register: cluster
when: check.stdout.find('clusterId') == -1
when: check.stdout == ""
ignore_errors: true

- name: clusterId patch to cm of kubesphere-config
- name: clusterId patch to cc of ks-installer
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer -n kubesphere-system --type merge -p '{"status": {"clusterId": "{{ cluster[ 'stdout' ] }}"}}'
when: check.stdout.find('clusterId') == -1
when: check.stdout == ""
ignore_errors: true

- name: KubeSphere | KubeSphere clusterId
set_fact:
cluster_str: "{{ cluster.stdout}}"
when: check.stdout.find('clusterId') == -1

- name: Get clusterId
shell: >
{{ bin_dir }}/kubectl get cc ks-installer -n kubesphere-system -o=jsonpath='{.status.clusterId}'
register: cluster
when: check.stdout.find('clusterId') != -1
ignore_errors: true

- name: KubeSphere | KubeSphere clusterId
set_fact:
cluster_str: "{{ cluster.stdout}}"
when: check.stdout.find('clusterId') != -1

- name: clusterId add ks
shell: >
echo "{{ cluster_str }}"
register: clusterId
ignore_errors: true

- name: Get multicluster status
shell: >
Expand All @@ -118,7 +105,7 @@

- name: Get essential information
uri:
url: "https://kubesphere.io/log/?k8s={{ k8s[ 'stdout' ] }}&ks={{ ks[ 'stdout' ] }}&ha={{ ha[ 'stdout' ] }}&c={{ number[ 'stdout' ] }}&K8st={{ kubetime[ 'stdout' ] }}&Kst={{ kubespheretime[ 'stdout' ] }}&os={{ firstipos[ 'stdout' ] }}&mID={{ mID[ 'stdout' ] }}&clusterId={{ clusterId[ 'stdout' ] }}&mod={{ mod[ 'stdout' ] }}&multi={{ multi[ 'stdout' ] }}"
url: "https://kubesphere.io/log/?k8s={{ k8s[ 'stdout' ] }}&ks={{ ks[ 'stdout' ] }}&ha={{ ha[ 'stdout' ] }}&c={{ number[ 'stdout' ] }}&K8st={{ kubetime[ 'stdout' ] }}&Kst={{ kubespheretime[ 'stdout' ] }}&os={{ firstipos[ 'stdout' ] }}&mID={{ mID[ 'stdout' ] }}&clusterId={{ cluster_str }}&mod={{ mod[ 'stdout' ] }}&multi={{ multi[ 'stdout' ] }}"
method: GET
validate_certs: false
ignore_errors: true

0 comments on commit ae80360

Please sign in to comment.