Skip to content

Commit

Permalink
Merge pull request #904 from huanggze/logging
Browse files Browse the repository at this point in the history
refactor observability components
  • Loading branch information
pixiake authored Jul 22, 2020
2 parents 9ea76c9 + 6ad3934 commit f271a04
Show file tree
Hide file tree
Showing 55 changed files with 366 additions and 728 deletions.
13 changes: 7 additions & 6 deletions deploy/cluster-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ spec:
etcdVolumeSize: 20Gi # etcd PVC size.
openldapVolumeSize: 2Gi # openldap PVC size.
redisVolumSize: 2Gi # Redis PVC size.
es: # Storage backend for logging, events and auditing.
elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
elasticsearchDataReplicas: 1 # total number of data nodes.
elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes.
elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes.
logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
console:
enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time.
port: 30880
Expand All @@ -44,13 +51,7 @@ spec:
replicas: 2
logging: # (CPU: 57 m, Memory: 2.76 G) Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: false
elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
elasticsearchDataReplicas: 1 # total number of data nodes.
logsidecarReplicas: 2
elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes.
elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes.
logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler).
enabled: false
monitoring:
Expand Down
9 changes: 9 additions & 0 deletions roles/common/defaults/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,15 @@ common:
etcdVolumeSize: 20Gi
openldapVolumeSize: 2Gi
redisVolumSize: 2Gi
es:
elasticsearchMasterReplicas: 1
elasticsearchDataReplicas: 1
elasticsearchMasterVolumeSize: 4Gi
elasticsearchDataVolumeSize: 20Gi
logMaxAge: 7
elkPrefix: logstash
# externalElasticsearchUrl:
# externalElasticsearchPort:


op_minio_accesskey: openpitrixminioaccesskey
Expand Down
12 changes: 11 additions & 1 deletion roles/common/tasks/common-install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,14 @@
- "import.stderr and 'is forbidden' not in import.stderr"
when:
- openpitrix.enabled or notification.enabled or alerting.enabled
- "status.etcd is not defined or status.etcd.status is not defined or status.etcd.status != 'enabled'"
- "status.etcd is not defined or status.etcd.status is not defined or status.etcd.status != 'enabled'"

- import_tasks: es-install.yaml
when:
- logging.enabled or events.enabled or auditing.enabled or servicemesh.enabled
- "status.es is not defined or status.es.status is not defined or status.es.status != 'enabled'"

- import_tasks: fluentbit-install.yaml
when:
- logging.enabled or events.enabled or auditing.enabled
- "status.fluentbit is not defined or status.fluentbit.status is not defined or status.fluentbit.status != 'enabled'"
125 changes: 125 additions & 0 deletions roles/common/tasks/es-install.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
- name: Kubesphere | Getting elasticsearch and curator installation files
copy:
src: "elasticsearch"
dest: "{{ kubesphere_dir }}/"


- name: Kubesphere | Creating custom manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kubesphere_dir }}/elasticsearch/{{ item.file }}"
with_items:
- { name: custom-values-elasticsearch, file: custom-values-elasticsearch.yaml }
- { name: custom-values-elasticsearch-curator, file: custom-values-elasticsearch-curator.yaml }


# Update check
- block:
- name: Kubesphere | Check elasticsearch data StatefulSet
shell: >
{{ bin_dir }}/kubectl get sts -n kubesphere-logging-system | grep "elasticsearch-logging-data" | wc -l
register: sts_number

- name: Kubesphere | Check elasticsearch storageclass
shell: >
{{ bin_dir }}/kubectl get sts -n kubesphere-logging-system elasticsearch-logging-data -o yaml | grep "storageClassName" | wc -l
register: sc_number

- name: Kubesphere | Comment elasticsearch storageclass parameter
shell: >
sed -i 's/\(^.*storageClass:.*$\)/#\1/' {{ kubesphere_dir }}/custom-values-elasticsearch.yaml
when:
- sts_number.stdout != "0"
- sc_number.stdout == "0"


# Deploy elasticsearch
- block:
- name: Kubesphere | Deploy elasticsearch-logging
shell: >
{{ bin_dir }}/helm upgrade --install elasticsearch-logging
{{ kubesphere_dir }}/elasticsearch/elasticsearch-1.22.1.tgz
-f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch.yaml
--namespace kubesphere-logging-system
register: es_result
ignore_errors: True

- name: Kubesphere | Get PersistentVolume Name
shell: >
kubectl get pv | grep "kubesphere-logging-system/data-elasticsearch-logging" | awk '{print $1}'
register: es_pv_name
when:
- "es_result.stderr and 'no matches for kind' in es_result.stderr"

- name: Kubesphere | Patch PersistentVolume (persistentVolumeReclaimPolicy)
shell: >
kubectl patch pv {{ item }}
-p '{"spec":{"persistentVolumeReclaimPolicy": "Retain"}}'
loop: "{{ es_pv_name.stdout_lines }}"
when:
- "es_result.stderr and 'no matches for kind' in es_result.stderr"

- name: Kubesphere | Delete elasticsearch
shell: >
{{ bin_dir }}/helm del --purge elasticsearch-logging
when:
- "es_result.stderr and 'no matches for kind' in es_result.stderr"

- name: Kubesphere | Waiting for seconds
wait_for: timeout=20
when:
- "es_result.stderr and 'no matches for kind' in es_result.stderr"

- name: Kubesphere | Deploy elasticsearch-logging
shell: >
{{ bin_dir }}/helm upgrade --install elasticsearch-logging
{{ kubesphere_dir }}/elasticsearch/elasticsearch-1.22.1.tgz
-f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch.yaml
--namespace kubesphere-logging-system
register: es_result
when:
- "es_result.stderr and 'no matches for kind' in es_result.stderr"

- fail:
msg: "{{ es_result.stderr }}"
when:
- es_result.stderr is defined
- es_result.stderr != ""
- es_result.stderr.find("no matches for kind") == -1

when:
- common.es.externalElasticsearchUrl is not defined
- common.es.externalElasticsearchPort is not defined


# Deploy elasticsearch curator
- block:
- name: Kubesphere | Check elasticsearch-logging-curator
shell: >
{{ bin_dir }}/helm list elasticsearch-logging-curator
register: curator_check
ignore_errors: True


- name: Kubesphere | Deploy elasticsearch-logging-curator
shell: >
{{ bin_dir }}/helm upgrade --install elasticsearch-logging-curator
{{ kubesphere_dir }}/elasticsearch/elasticsearch-curator-1.3.3.tgz
-f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch-curator.yaml
--namespace kubesphere-logging-system
when:
- (curator_check.stdout.find("DEPLOYED") == -1) or (curator_check.stdout.find("5.5.4-0217") == -1)


# Update clusterconfig (cc) status
- name: Kubesphere | import es status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"es": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}'
-n kubesphere-system
register: import
failed_when: "import.stderr and 'Warning' not in import.stderr"
until: import is succeeded
retries: 5
delay: 3
76 changes: 76 additions & 0 deletions roles/common/tasks/fluentbit-install.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
- name: Kubesphere | Getting elasticsearch and curator installation files
copy:
src: "fluentbit-operator"
dest: "{{ kubesphere_dir }}/"


- name: Kubesphere | Creating custom manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.path }}/{{ item.file }}"
with_items:
- { path: fluentbit, file: custom-fluentbit-fluentBit.yaml }
- { path: init, file: custom-fluentbit-operator-deployment.yaml }
- { path: migrator, file: custom-migrator-job.yaml }


# Upgrade or install fluent bit operator
- name: Kubesphere | Checking kubesphere-version
shell: >
{{ bin_dir }}/kubectl describe configmap -n kubesphere-system ks-console-ae-config | grep "kubesphere:" | awk '{print $2}'
register: kubesphere_version
ignore_errors: True


- name: Kubesphere | Backup old fluentbit crd
shell: >
{{ bin_dir }}/kubectl get fluentbits.logging.kubesphere.io -n kubesphere-logging-system fluent-bit -o yaml > {{ kubesphere_dir }}/fluentbit-crd.yaml.bak
when:
- kubesphere_version.stdout.split('.')[0] == "2"


- name: Kubesphere | Deleting old fluentbit operator
shell: >
{{ bin_dir }}/kubectl delete {{ item.type }} -n kubesphere-logging-system {{ item.name }}
loop:
- { type: deploy, name: logging-fluentbit-operator }
- { type: fluentbits.logging.kubesphere.io, name: fluent-bit }
- { type: ds, name: fluent-bit }
- { type: crd, name: fluentbits.logging.kubesphere.io }
when:
- kubesphere_version.stdout.split('.')[0] == "2"


- name: Kubesphere | Prepare fluentbit operator setup
shell: >
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/init
- name: Kubesphere | Migrate fluentbit operator old config
shell: >
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/migrator
when:
- kubesphere_version.stdout.split('.')[0] == "2"


- name: Kubesphere | Deploy new fluentbit operator
shell: >
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/fluentbit
register: import
until: import is succeeded
retries: 5
delay: 3


# Update clusterconfig (cc) status
- name: Kubesphere | import fluentbit status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"fluentbit": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}'
-n kubesphere-system
register: import
failed_when: "import.stderr and 'Warning' not in import.stderr"
until: import is succeeded
retries: 5
delay: 3
28 changes: 28 additions & 0 deletions roles/common/tasks/get_old_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,32 @@
when:
- redis_ha_pvc.rc == 0
- redis_ha_pvc.stdout != ""
ignore_errors: True

- name: Kubesphere | Check es-master PersistentVolumeClaim
shell: >
{{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-discovery-0 -o jsonpath='{.status.capacity.storage}'
register: es_master_pvc
ignore_errors: True

- name: Kubesphere | Setting es master pv size
set_fact:
es_master_pv_size: "{{ es_master_pvc.stdout }}"
when:
- es_master_pvc.rc == 0
- es_master_pvc.stdout != ""
ignore_errors: True

- name: Kubesphere | Check es data PersistentVolumeClaim
shell: >
{{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-data-0 -o jsonpath='{.status.capacity.storage}'
register: es_data_pvc
ignore_errors: True

- name: Kubesphere | Setting es data pv size
set_fact:
es_data_pv_size: "{{ es_data_pvc.stdout }}"
when:
- es_data_pvc.rc == 0
- es_data_pvc.stdout != ""
ignore_errors: True
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ configMaps:
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ logging.logMaxAge }}
unit_count: {{ common.es.logMaxAge }}
field:
stats_result:
epoch:
Expand All @@ -58,9 +58,9 @@ configMaps:
---
client:
hosts:
- {% if logging.externalElasticsearchUrl is defined %}{{ logging.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}
- {% if common.es.externalElasticsearchUrl is defined %}{{ common.es.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}

port: {% if logging.externalElasticsearchPort is defined %}{{ logging.externalElasticsearchPort }}{% else %}9200{% endif %}
port: {% if common.es.externalElasticsearchPort is defined %}{{ common.es.externalElasticsearchPort }}{% else %}9200{% endif %}

# url_prefix:
# use_ssl: True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,14 @@ cluster:
master:
name: master
exposeHttp: false
replicas: {{ logging.elasticsearchMasterReplicas }}
replicas: {{ common.es.elasticsearchMasterReplicas }}
heapSize: "512m"
# additionalJavaOpts: "-XX:MaxRAM=512m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: {% if es_master_pv_size is defined %}{{ es_master_pv_size }}{% else %}{{ logging.elasticsearchMasterVolumeSize }}{% endif %}
size: {% if es_master_pv_size is defined %}{{ es_master_pv_size }}{% else %}{{ common.es.elasticsearchMasterVolumeSize }}{% endif %}

{% if persistence.storageClass is defined and persistence.storageClass != "" %}
storageClass: "{{ persistence.storageClass }}"
Expand Down Expand Up @@ -124,14 +124,14 @@ data:
serviceType: ClusterIP
loadBalancerIP: {}
loadBalancerSourceRanges: {}
replicas: {{ logging.elasticsearchDataReplicas }}
replicas: {{ common.es.elasticsearchDataReplicas }}
heapSize: "1536m"
# additionalJavaOpts: "-XX:MaxRAM=1536m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: {% if es_data_pv_size is defined %}{{ es_data_pv_size }}{% else %}{{ logging.elasticsearchDataVolumeSize }}{% endif %}
size: {% if es_data_pv_size is defined %}{{ es_data_pv_size }}{% else %}{{ common.es.elasticsearchDataVolumeSize }}{% endif %}

{% if persistence.storageClass is defined and persistence.storageClass != "" %}
storageClass: "{{ persistence.storageClass }}"
Expand Down
16 changes: 16 additions & 0 deletions roles/ks-auditing/tasks/fluentbit-operator.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
- name: ks-auditing | Getting logging installation files
copy:
src: "fluentbit-operator"
dest: "{{ kubesphere_dir }}/"


- name: ks-auditing | Creating manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.file }}"
with_items:
- { name: custom-output-elasticsearch-auditing, file: custom-output-elasticsearch-auditing.yaml }

- name: ks-auditing | Apply fluentbit operator custom resources
shell: >
{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator
3 changes: 3 additions & 0 deletions roles/ks-auditing/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@
- (auditing_check.stdout.find("DEPLOYED") == -1) or (auditing_check.stdout.find("v0.1.0") == -1)


- import_tasks: fluentbit-operator.yaml


- name: ks-auditing | import ks-auditing status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
Expand Down
Loading

0 comments on commit f271a04

Please sign in to comment.