diff --git a/deploy/cluster-configuration.yaml b/deploy/cluster-configuration.yaml index 30062d8f9..cf41faf02 100644 --- a/deploy/cluster-configuration.yaml +++ b/deploy/cluster-configuration.yaml @@ -22,6 +22,13 @@ spec: etcdVolumeSize: 20Gi # etcd PVC size. openldapVolumeSize: 2Gi # openldap PVC size. redisVolumSize: 2Gi # Redis PVC size. + es: # Storage backend for logging, events and auditing. + elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number + elasticsearchDataReplicas: 1 # total number of data nodes. + elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes. + elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes. + logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. console: enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time. port: 30880 @@ -44,13 +51,7 @@ spec: replicas: 2 logging: # (CPU: 57 m, Memory: 2.76 G) Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. enabled: false - elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number - elasticsearchDataReplicas: 1 # total number of data nodes. logsidecarReplicas: 2 - elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes. - elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes. - logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. - elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler). enabled: false monitoring: diff --git a/roles/common/defaults/main.yaml b/roles/common/defaults/main.yaml index 9c91cf156..c9fd4dabc 100644 --- a/roles/common/defaults/main.yaml +++ b/roles/common/defaults/main.yaml @@ -6,6 +6,15 @@ common: etcdVolumeSize: 20Gi openldapVolumeSize: 2Gi redisVolumSize: 2Gi + es: + elasticsearchMasterReplicas: 1 + elasticsearchDataReplicas: 1 + elasticsearchMasterVolumeSize: 4Gi + elasticsearchDataVolumeSize: 20Gi + logMaxAge: 7 + elkPrefix: logstash + # externalElasticsearchUrl: + # externalElasticsearchPort: op_minio_accesskey: openpitrixminioaccesskey diff --git a/roles/ks-logging/files/logging/elasticsearch-1.22.1.tgz b/roles/common/files/elasticsearch/elasticsearch-1.22.1.tgz similarity index 100% rename from roles/ks-logging/files/logging/elasticsearch-1.22.1.tgz rename to roles/common/files/elasticsearch/elasticsearch-1.22.1.tgz diff --git a/roles/ks-logging/files/logging/elasticsearch-curator-1.3.3.tgz b/roles/common/files/elasticsearch/elasticsearch-curator-1.3.3.tgz similarity index 100% rename from roles/ks-logging/files/logging/elasticsearch-curator-1.3.3.tgz rename to roles/common/files/elasticsearch/elasticsearch-curator-1.3.3.tgz diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/fluentbitconfig-fluentBitConfig.yaml b/roles/common/files/fluentbit-operator/fluentbit/fluentbitconfig-fluentBitConfig.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/fluentbitconfig-fluentBitConfig.yaml rename to roles/common/files/fluentbit-operator/fluentbit/fluentbitconfig-fluentBitConfig.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-clusterRole.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-clusterRole.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-clusterRole.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-clusterRole.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-clusterRoleBinding.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-clusterRoleBinding.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-clusterRoleBinding.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-clusterRoleBinding.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-filterCustomResourceDefinition.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-filterCustomResourceDefinition.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-filterCustomResourceDefinition.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-filterCustomResourceDefinition.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-fluentbitCustomResourceDefinition.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-fluentbitCustomResourceDefinition.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-fluentbitCustomResourceDefinition.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-fluentbitCustomResourceDefinition.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-fluentbitconfigCustomResourceDefinition.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-fluentbitconfigCustomResourceDefinition.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-fluentbitconfigCustomResourceDefinition.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-fluentbitconfigCustomResourceDefinition.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-inputCustomResourceDefinition.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-inputCustomResourceDefinition.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-inputCustomResourceDefinition.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-inputCustomResourceDefinition.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-outputCustomResourceDefinition.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-outputCustomResourceDefinition.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-outputCustomResourceDefinition.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-outputCustomResourceDefinition.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-serviceAccount.yaml b/roles/common/files/fluentbit-operator/init/fluentbit-operator-serviceAccount.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-serviceAccount.yaml rename to roles/common/files/fluentbit-operator/init/fluentbit-operator-serviceAccount.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/migrator/migrator-rbac.yaml b/roles/common/files/fluentbit-operator/migrator/migrator-rbac.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/migrator/migrator-rbac.yaml rename to roles/common/files/fluentbit-operator/migrator/migrator-rbac.yaml diff --git a/roles/common/tasks/common-install.yaml b/roles/common/tasks/common-install.yaml index ad5900760..ee4bf9762 100644 --- a/roles/common/tasks/common-install.yaml +++ b/roles/common/tasks/common-install.yaml @@ -27,4 +27,14 @@ - "import.stderr and 'is forbidden' not in import.stderr" when: - openpitrix.enabled or notification.enabled or alerting.enabled - - "status.etcd is not defined or status.etcd.status is not defined or status.etcd.status != 'enabled'" \ No newline at end of file + - "status.etcd is not defined or status.etcd.status is not defined or status.etcd.status != 'enabled'" + +- import_tasks: es-install.yaml + when: + - logging.enabled or events.enabled or auditing.enabled or servicemesh.enabled + - "status.es is not defined or status.es.status is not defined or status.es.status != 'enabled'" + +- import_tasks: fluentbit-install.yaml + when: + - logging.enabled or events.enabled or auditing.enabled + - "status.fluentbit is not defined or status.fluentbit.status is not defined or status.fluentbit.status != 'enabled'" \ No newline at end of file diff --git a/roles/common/tasks/es-install.yaml b/roles/common/tasks/es-install.yaml new file mode 100644 index 000000000..4928192e4 --- /dev/null +++ b/roles/common/tasks/es-install.yaml @@ -0,0 +1,125 @@ +- name: Kubesphere | Getting elasticsearch and curator installation files + copy: + src: "elasticsearch" + dest: "{{ kubesphere_dir }}/" + + +- name: Kubesphere | Creating custom manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kubesphere_dir }}/elasticsearch/{{ item.file }}" + with_items: + - { name: custom-values-elasticsearch, file: custom-values-elasticsearch.yaml } + - { name: custom-values-elasticsearch-curator, file: custom-values-elasticsearch-curator.yaml } + + +# Update check +- block: + - name: Kubesphere | Check elasticsearch data StatefulSet + shell: > + {{ bin_dir }}/kubectl get sts -n kubesphere-logging-system | grep "elasticsearch-logging-data" | wc -l + register: sts_number + + - name: Kubesphere | Check elasticsearch storageclass + shell: > + {{ bin_dir }}/kubectl get sts -n kubesphere-logging-system elasticsearch-logging-data -o yaml | grep "storageClassName" | wc -l + register: sc_number + + - name: Kubesphere | Comment elasticsearch storageclass parameter + shell: > + sed -i 's/\(^.*storageClass:.*$\)/#\1/' {{ kubesphere_dir }}/custom-values-elasticsearch.yaml + when: + - sts_number.stdout != "0" + - sc_number.stdout == "0" + + +# Deploy elasticsearch +- block: + - name: Kubesphere | Deploy elasticsearch-logging + shell: > + {{ bin_dir }}/helm upgrade --install elasticsearch-logging + {{ kubesphere_dir }}/elasticsearch/elasticsearch-1.22.1.tgz + -f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch.yaml + --namespace kubesphere-logging-system + register: es_result + ignore_errors: True + + - name: Kubesphere | Get PersistentVolume Name + shell: > + kubectl get pv | grep "kubesphere-logging-system/data-elasticsearch-logging" | awk '{print $1}' + register: es_pv_name + when: + - "es_result.stderr and 'no matches for kind' in es_result.stderr" + + - name: Kubesphere | Patch PersistentVolume (persistentVolumeReclaimPolicy) + shell: > + kubectl patch pv {{ item }} + -p '{"spec":{"persistentVolumeReclaimPolicy": "Retain"}}' + loop: "{{ es_pv_name.stdout_lines }}" + when: + - "es_result.stderr and 'no matches for kind' in es_result.stderr" + + - name: Kubesphere | Delete elasticsearch + shell: > + {{ bin_dir }}/helm del --purge elasticsearch-logging + when: + - "es_result.stderr and 'no matches for kind' in es_result.stderr" + + - name: Kubesphere | Waiting for seconds + wait_for: timeout=20 + when: + - "es_result.stderr and 'no matches for kind' in es_result.stderr" + + - name: Kubesphere | Deploy elasticsearch-logging + shell: > + {{ bin_dir }}/helm upgrade --install elasticsearch-logging + {{ kubesphere_dir }}/elasticsearch/elasticsearch-1.22.1.tgz + -f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch.yaml + --namespace kubesphere-logging-system + register: es_result + when: + - "es_result.stderr and 'no matches for kind' in es_result.stderr" + + - fail: + msg: "{{ es_result.stderr }}" + when: + - es_result.stderr is defined + - es_result.stderr != "" + - es_result.stderr.find("no matches for kind") == -1 + + when: + - common.es.externalElasticsearchUrl is not defined + - common.es.externalElasticsearchPort is not defined + + +# Deploy elasticsearch curator +- block: + - name: Kubesphere | Check elasticsearch-logging-curator + shell: > + {{ bin_dir }}/helm list elasticsearch-logging-curator + register: curator_check + ignore_errors: True + + + - name: Kubesphere | Deploy elasticsearch-logging-curator + shell: > + {{ bin_dir }}/helm upgrade --install elasticsearch-logging-curator + {{ kubesphere_dir }}/elasticsearch/elasticsearch-curator-1.3.3.tgz + -f {{ kubesphere_dir }}/elasticsearch/custom-values-elasticsearch-curator.yaml + --namespace kubesphere-logging-system + when: + - (curator_check.stdout.find("DEPLOYED") == -1) or (curator_check.stdout.find("5.5.4-0217") == -1) + + +# Update clusterconfig (cc) status +- name: Kubesphere | import es status + shell: > + {{ bin_dir }}/kubectl patch cc ks-installer + --type merge + -p '{"status": {"es": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}' + -n kubesphere-system + register: import + failed_when: "import.stderr and 'Warning' not in import.stderr" + until: import is succeeded + retries: 5 + delay: 3 \ No newline at end of file diff --git a/roles/common/tasks/fluentbit-install.yaml b/roles/common/tasks/fluentbit-install.yaml new file mode 100644 index 000000000..51e3f83cb --- /dev/null +++ b/roles/common/tasks/fluentbit-install.yaml @@ -0,0 +1,76 @@ +- name: Kubesphere | Getting elasticsearch and curator installation files + copy: + src: "fluentbit-operator" + dest: "{{ kubesphere_dir }}/" + + +- name: Kubesphere | Creating custom manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.path }}/{{ item.file }}" + with_items: + - { path: fluentbit, file: custom-fluentbit-fluentBit.yaml } + - { path: init, file: custom-fluentbit-operator-deployment.yaml } + - { path: migrator, file: custom-migrator-job.yaml } + + +# Upgrade or install fluent bit operator +- name: Kubesphere | Checking kubesphere-version + shell: > + {{ bin_dir }}/kubectl describe configmap -n kubesphere-system ks-console-ae-config | grep "kubesphere:" | awk '{print $2}' + register: kubesphere_version + ignore_errors: True + + +- name: Kubesphere | Backup old fluentbit crd + shell: > + {{ bin_dir }}/kubectl get fluentbits.logging.kubesphere.io -n kubesphere-logging-system fluent-bit -o yaml > {{ kubesphere_dir }}/fluentbit-crd.yaml.bak + when: + - kubesphere_version.stdout.split('.')[0] == "2" + + +- name: Kubesphere | Deleting old fluentbit operator + shell: > + {{ bin_dir }}/kubectl delete {{ item.type }} -n kubesphere-logging-system {{ item.name }} + loop: + - { type: deploy, name: logging-fluentbit-operator } + - { type: fluentbits.logging.kubesphere.io, name: fluent-bit } + - { type: ds, name: fluent-bit } + - { type: crd, name: fluentbits.logging.kubesphere.io } + when: + - kubesphere_version.stdout.split('.')[0] == "2" + + +- name: Kubesphere | Prepare fluentbit operator setup + shell: > + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/init + + +- name: Kubesphere | Migrate fluentbit operator old config + shell: > + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/migrator + when: + - kubesphere_version.stdout.split('.')[0] == "2" + + +- name: Kubesphere | Deploy new fluentbit operator + shell: > + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator/fluentbit + register: import + until: import is succeeded + retries: 5 + delay: 3 + + +# Update clusterconfig (cc) status +- name: Kubesphere | import fluentbit status + shell: > + {{ bin_dir }}/kubectl patch cc ks-installer + --type merge + -p '{"status": {"fluentbit": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}' + -n kubesphere-system + register: import + failed_when: "import.stderr and 'Warning' not in import.stderr" + until: import is succeeded + retries: 5 + delay: 3 \ No newline at end of file diff --git a/roles/common/tasks/get_old_config.yaml b/roles/common/tasks/get_old_config.yaml index e3dee7da9..b2161864a 100644 --- a/roles/common/tasks/get_old_config.yaml +++ b/roles/common/tasks/get_old_config.yaml @@ -82,4 +82,32 @@ when: - redis_ha_pvc.rc == 0 - redis_ha_pvc.stdout != "" + ignore_errors: True + +- name: Kubesphere | Check es-master PersistentVolumeClaim + shell: > + {{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-discovery-0 -o jsonpath='{.status.capacity.storage}' + register: es_master_pvc + ignore_errors: True + +- name: Kubesphere | Setting es master pv size + set_fact: + es_master_pv_size: "{{ es_master_pvc.stdout }}" + when: + - es_master_pvc.rc == 0 + - es_master_pvc.stdout != "" + ignore_errors: True + +- name: Kubesphere | Check es data PersistentVolumeClaim + shell: > + {{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-data-0 -o jsonpath='{.status.capacity.storage}' + register: es_data_pvc + ignore_errors: True + +- name: Kubesphere | Setting es data pv size + set_fact: + es_data_pv_size: "{{ es_data_pvc.stdout }}" + when: + - es_data_pvc.rc == 0 + - es_data_pvc.stdout != "" ignore_errors: True \ No newline at end of file diff --git a/roles/ks-logging/templates/custom-fluentbit-fluentBit.yaml.j2 b/roles/common/templates/custom-fluentbit-fluentBit.yaml.j2 similarity index 100% rename from roles/ks-logging/templates/custom-fluentbit-fluentBit.yaml.j2 rename to roles/common/templates/custom-fluentbit-fluentBit.yaml.j2 diff --git a/roles/ks-logging/templates/custom-fluentbit-operator-deployment.yaml.j2 b/roles/common/templates/custom-fluentbit-operator-deployment.yaml.j2 similarity index 100% rename from roles/ks-logging/templates/custom-fluentbit-operator-deployment.yaml.j2 rename to roles/common/templates/custom-fluentbit-operator-deployment.yaml.j2 diff --git a/roles/ks-logging/templates/custom-migrator-job.yaml.j2 b/roles/common/templates/custom-migrator-job.yaml.j2 similarity index 100% rename from roles/ks-logging/templates/custom-migrator-job.yaml.j2 rename to roles/common/templates/custom-migrator-job.yaml.j2 diff --git a/roles/ks-logging/templates/custom-values-elasticsearch-curator.yaml.j2 b/roles/common/templates/custom-values-elasticsearch-curator.yaml.j2 similarity index 87% rename from roles/ks-logging/templates/custom-values-elasticsearch-curator.yaml.j2 rename to roles/common/templates/custom-values-elasticsearch-curator.yaml.j2 index 4fb780ba2..88ee60f1e 100644 --- a/roles/ks-logging/templates/custom-values-elasticsearch-curator.yaml.j2 +++ b/roles/common/templates/custom-values-elasticsearch-curator.yaml.j2 @@ -48,7 +48,7 @@ configMaps: direction: older timestring: '%Y.%m.%d' unit: days - unit_count: {{ logging.logMaxAge }} + unit_count: {{ common.es.logMaxAge }} field: stats_result: epoch: @@ -58,9 +58,9 @@ configMaps: --- client: hosts: - - {% if logging.externalElasticsearchUrl is defined %}{{ logging.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %} + - {% if common.es.externalElasticsearchUrl is defined %}{{ common.es.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %} - port: {% if logging.externalElasticsearchPort is defined %}{{ logging.externalElasticsearchPort }}{% else %}9200{% endif %} + port: {% if common.es.externalElasticsearchPort is defined %}{{ common.es.externalElasticsearchPort }}{% else %}9200{% endif %} # url_prefix: # use_ssl: True diff --git a/roles/ks-logging/templates/custom-values-elasticsearch.yaml.j2 b/roles/common/templates/custom-values-elasticsearch.yaml.j2 similarity index 95% rename from roles/ks-logging/templates/custom-values-elasticsearch.yaml.j2 rename to roles/common/templates/custom-values-elasticsearch.yaml.j2 index 35f16e162..cb0830c9e 100644 --- a/roles/ks-logging/templates/custom-values-elasticsearch.yaml.j2 +++ b/roles/common/templates/custom-values-elasticsearch.yaml.j2 @@ -72,14 +72,14 @@ cluster: master: name: master exposeHttp: false - replicas: {{ logging.elasticsearchMasterReplicas }} + replicas: {{ common.es.elasticsearchMasterReplicas }} heapSize: "512m" # additionalJavaOpts: "-XX:MaxRAM=512m" persistence: enabled: true accessMode: ReadWriteOnce name: data - size: {% if es_master_pv_size is defined %}{{ es_master_pv_size }}{% else %}{{ logging.elasticsearchMasterVolumeSize }}{% endif %} + size: {% if es_master_pv_size is defined %}{{ es_master_pv_size }}{% else %}{{ common.es.elasticsearchMasterVolumeSize }}{% endif %} {% if persistence.storageClass is defined and persistence.storageClass != "" %} storageClass: "{{ persistence.storageClass }}" @@ -124,14 +124,14 @@ data: serviceType: ClusterIP loadBalancerIP: {} loadBalancerSourceRanges: {} - replicas: {{ logging.elasticsearchDataReplicas }} + replicas: {{ common.es.elasticsearchDataReplicas }} heapSize: "1536m" # additionalJavaOpts: "-XX:MaxRAM=1536m" persistence: enabled: true accessMode: ReadWriteOnce name: data - size: {% if es_data_pv_size is defined %}{{ es_data_pv_size }}{% else %}{{ logging.elasticsearchDataVolumeSize }}{% endif %} + size: {% if es_data_pv_size is defined %}{{ es_data_pv_size }}{% else %}{{ common.es.elasticsearchDataVolumeSize }}{% endif %} {% if persistence.storageClass is defined and persistence.storageClass != "" %} storageClass: "{{ persistence.storageClass }}" diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-auditing.yaml b/roles/ks-auditing/files/fluentbit-operator/filter-auditing.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-auditing.yaml rename to roles/ks-auditing/files/fluentbit-operator/filter-auditing.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail-auditing.yaml b/roles/ks-auditing/files/fluentbit-operator/input-auditing.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail-auditing.yaml rename to roles/ks-auditing/files/fluentbit-operator/input-auditing.yaml diff --git a/roles/ks-auditing/tasks/fluentbit-operator.yaml b/roles/ks-auditing/tasks/fluentbit-operator.yaml new file mode 100644 index 000000000..a39196085 --- /dev/null +++ b/roles/ks-auditing/tasks/fluentbit-operator.yaml @@ -0,0 +1,16 @@ +- name: ks-auditing | Getting logging installation files + copy: + src: "fluentbit-operator" + dest: "{{ kubesphere_dir }}/" + + +- name: ks-auditing | Creating manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.file }}" + with_items: + - { name: custom-output-elasticsearch-auditing, file: custom-output-elasticsearch-auditing.yaml } + +- name: ks-auditing | Apply fluentbit operator custom resources + shell: > + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator \ No newline at end of file diff --git a/roles/ks-auditing/tasks/main.yaml b/roles/ks-auditing/tasks/main.yaml index a02959ddc..7eacc160e 100644 --- a/roles/ks-auditing/tasks/main.yaml +++ b/roles/ks-auditing/tasks/main.yaml @@ -35,6 +35,9 @@ - (auditing_check.stdout.find("DEPLOYED") == -1) or (auditing_check.stdout.find("v0.1.0") == -1) +- import_tasks: fluentbit-operator.yaml + + - name: ks-auditing | import ks-auditing status shell: > {{ bin_dir }}/kubectl patch cc ks-installer diff --git a/roles/ks-auditing/templates/custom-output-elasticsearch-auditing.yaml.j2 b/roles/ks-auditing/templates/custom-output-elasticsearch-auditing.yaml.j2 new file mode 100644 index 000000000..61a7d0732 --- /dev/null +++ b/roles/ks-auditing/templates/custom-output-elasticsearch-auditing.yaml.j2 @@ -0,0 +1,16 @@ +apiVersion: logging.kubesphere.io/v1alpha2 +kind: Output +metadata: + name: es-auditing + namespace: kubesphere-logging-system + labels: + logging.kubesphere.io/enabled: "true" + logging.kubesphere.io/component: "auditing" +spec: + match: kube_auditing + es: + logstashFormat: true + host: "{% if common.es.externalElasticsearchUrl is defined %}{{ common.es.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" + logstashPrefix: "ks-{{ common.es.elkPrefix }}-auditing" + port: {% if common.es.externalElasticsearchPort is defined %}{{ common.es.externalElasticsearchPort }}{% else %}9200{% endif %} + diff --git a/roles/ks-core/config/templates/kubesphere-config.yaml.j2 b/roles/ks-core/config/templates/kubesphere-config.yaml.j2 index c2ace7df9..4c3de7d82 100644 --- a/roles/ks-core/config/templates/kubesphere-config.yaml.j2 +++ b/roles/ks-core/config/templates/kubesphere-config.yaml.j2 @@ -96,33 +96,33 @@ data: {% if logging.enabled is defined and logging.enabled == true %} logging: -{% if logging.externalElasticsearchUrl is defined and logging.externalElasticsearchPort is defined %} - host: http://{{ logging.externalElasticsearchUrl }}:{{ logging.externalElasticsearchPort }} +{% if common.es.externalElasticsearchUrl is defined and common.es.externalElasticsearchPort is defined %} + host: http://{{ common.es.externalElasticsearchUrl }}:{{ common.es.externalElasticsearchPort }} {% else %} host: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 {% endif %} - indexPrefix: ks-{{ logging.elkPrefix }}-log + indexPrefix: ks-{{ common.es.elkPrefix }}-log {% endif %} {% if events.enabled is defined and events.enabled == true %} events: -{% if logging.externalElasticsearchUrl is defined and logging.externalElasticsearchPort is defined %} - host: http://{{ logging.externalElasticsearchUrl }}:{{ logging.externalElasticsearchPort }} +{% if common.es.externalElasticsearchUrl is defined and common.es.externalElasticsearchPort is defined %} + host: http://{{ common.es.externalElasticsearchUrl }}:{{ common.es.externalElasticsearchPort }} {% else %} host: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 {% endif %} - indexPrefix: ks-{{ logging.elkPrefix }}-events + indexPrefix: ks-{{ common.es.elkPrefix }}-events {% endif %} {% if auditing.enabled is defined and auditing.enabled == true %} auditing: enable: true -{% if logging.externalElasticsearchUrl is defined and logging.externalElasticsearchPort is defined %} - host: http://{{ logging.externalElasticsearchUrl }}:{{ logging.externalElasticsearchPort }} +{% if common.es.externalElasticsearchUrl is defined and common.es.externalElasticsearchPort is defined %} + host: http://{{ common.es.externalElasticsearchUrl }}:{{ common.es.externalElasticsearchPort }} {% else %} host: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 {% endif %} - indexPrefix: ks-{{ logging.elkPrefix }}-auditing + indexPrefix: ks-{{ common.es.elkPrefix }}-auditing {% endif %} {% if alerting.enabled is defined and alerting.enabled == true %} diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-events.yaml b/roles/ks-events/files/fluentbit-operator/filter-events.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-events.yaml rename to roles/ks-events/files/fluentbit-operator/filter-events.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail-events.yaml b/roles/ks-events/files/fluentbit-operator/input-events.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail-events.yaml rename to roles/ks-events/files/fluentbit-operator/input-events.yaml diff --git a/roles/ks-events/tasks/fluentbit-operator.yaml b/roles/ks-events/tasks/fluentbit-operator.yaml new file mode 100644 index 000000000..6854728a6 --- /dev/null +++ b/roles/ks-events/tasks/fluentbit-operator.yaml @@ -0,0 +1,16 @@ +- name: ks-events | Getting logging installation files + copy: + src: "fluentbit-operator" + dest: "{{ kubesphere_dir }}/" + + +- name: ks-events | Creating manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.file }}" + with_items: + - { name: custom-output-elasticsearch-events, file: custom-output-elasticsearch-events.yaml } + +- name: ks-events | Apply fluentbit operator custom resources + shell: > + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator \ No newline at end of file diff --git a/roles/ks-events/tasks/main.yaml b/roles/ks-events/tasks/main.yaml index 210dec6bd..4a808afe4 100644 --- a/roles/ks-events/tasks/main.yaml +++ b/roles/ks-events/tasks/main.yaml @@ -35,6 +35,9 @@ - (events_check.stdout.find("DEPLOYED") == -1) or (events_check.stdout.find("v0.1.0") == -1) +- import_tasks: fluentbit-operator.yaml + + - name: ks-events | import ks-events status shell: > {{ bin_dir }}/kubectl patch cc ks-installer diff --git a/roles/ks-events/templates/custom-output-elasticsearch-events.yaml.j2 b/roles/ks-events/templates/custom-output-elasticsearch-events.yaml.j2 new file mode 100644 index 000000000..97a03538d --- /dev/null +++ b/roles/ks-events/templates/custom-output-elasticsearch-events.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: logging.kubesphere.io/v1alpha2 +kind: Output +metadata: + name: es-events + namespace: kubesphere-logging-system + labels: + logging.kubesphere.io/enabled: "true" + logging.kubesphere.io/component: "events" +spec: + match: kube_events + es: + logstashFormat: true + host: "{% if common.es.externalElasticsearchUrl is defined %}{{ common.es.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" + logstashPrefix: "ks-{{ common.es.elkPrefix }}-events" + port: {% if common.es.externalElasticsearchPort is defined %}{{ common.es.externalElasticsearchPort }}{% else %}9200{% endif %} diff --git a/roles/ks-istio/templates/jaeger-production.yaml.j2 b/roles/ks-istio/templates/jaeger-production.yaml.j2 index 466dd4872..be339b648 100644 --- a/roles/ks-istio/templates/jaeger-production.yaml.j2 +++ b/roles/ks-istio/templates/jaeger-production.yaml.j2 @@ -17,9 +17,9 @@ spec: enabled: true options: es: - index-prefix: {{ logging.elkPrefix }} -{% if logging.externalElasticsearchUrl is defined and logging.externalElasticsearchPort is defined %} - server-urls: http://{{ logging.externalElasticsearchUrl }}:{{ logging.externalElasticsearchPort }} + index-prefix: {{ common.es.elkPrefix }} +{% if common.es.externalElasticsearchUrl is defined and common.es.externalElasticsearchPort is defined %} + server-urls: http://{{ common.es.externalElasticsearchUrl }}:{{ common.es.externalElasticsearchPort }} {% else %} server-urls: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 {% endif %} diff --git a/roles/ks-logging/defaults/main.yaml b/roles/ks-logging/defaults/main.yaml index e75615d88..cf7d7549c 100644 --- a/roles/ks-logging/defaults/main.yaml +++ b/roles/ks-logging/defaults/main.yaml @@ -1,14 +1,5 @@ --- logging: - elasticsearchMasterReplicas: 1 - elasticsearchDataReplicas: 2 logsidecarReplicas: 2 - elasticsearchVolumeSize: 20Gi - logMaxAge: 7 - elkPrefix: logstash - kibana: - enable: false -# externalElasticsearchUrl: -# externalElasticsearchPort: diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-kubernetes.yaml b/roles/ks-logging/files/fluentbit-operator/filter-logging.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/filter-kubernetes.yaml rename to roles/ks-logging/files/fluentbit-operator/filter-logging.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail.yaml b/roles/ks-logging/files/fluentbit-operator/input-logging.yaml similarity index 100% rename from roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/input-tail.yaml rename to roles/ks-logging/files/fluentbit-operator/input-logging.yaml diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/fluentbit-fluentBit.yaml b/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/fluentbit-fluentBit.yaml deleted file mode 100644 index f8a22830e..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/fluentbit-fluentBit.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: FluentBit -metadata: - name: fluent-bit - namespace: kubesphere-logging-system - labels: - app.kubernetes.io/name: fluent-bit -spec: - image: kubespheredev/fluent-bit:v1.5.0 - positionDB: - emptyDir: {} - fluentBitConfigName: fluent-bit-config - tolerations: - - operator: Exists diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-auditing.yaml b/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-auditing.yaml deleted file mode 100644 index 5c0cccd03..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-auditing.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - labels: - logging.kubesphere.io/enabled: 'true' - logging.kubesphere.io/component: "auditing" - name: es-auditing - namespace: kubesphere-logging-system -spec: - es: - host: elasticsearch-logging-data.kubesphere-logging-system.svc - logstashFormat: true - logstashPrefix: ks-logstash-auditing - port: 9200 - match: kube_auditing - diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-events.yaml b/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-events.yaml deleted file mode 100644 index 79674b9a5..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch-events.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - name: es-events - namespace: kubesphere-logging-system - labels: - logging.kubesphere.io/enabled: "true" - logging.kubesphere.io/component: "events" -spec: - match: kube_events - es: - host: elasticsearch-logging-data.kubesphere-logging-system.svc - port: 9200 - logstashPrefix: ks-logstash-events - logstashFormat: true \ No newline at end of file diff --git a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch.yaml b/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch.yaml deleted file mode 100644 index a2bf21790..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/fluent-bit/output-elasticsearch.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - name: es - namespace: kubesphere-logging-system - labels: - logging.kubesphere.io/enabled: "true" - logging.kubesphere.io/component: "logging" -spec: - match: kube.* - es: - host: elasticsearch-logging-data.kubesphere-logging-system.svc - port: 9200 - logstashPrefix: ks-logstash-log - logstashFormat: true - timeKey: "@timestamp" \ No newline at end of file diff --git a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-deployment.yaml b/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-deployment.yaml deleted file mode 100644 index 41158797e..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/init/fluentbit-operator-deployment.yaml +++ /dev/null @@ -1,53 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: fluentbit-operator - namespace: kubesphere-logging-system - labels: - app.kubernetes.io/component: operator - app.kubernetes.io/name: fluentbit-operator -spec: - selector: - matchLabels: - app.kubernetes.io/component: operator - app.kubernetes.io/name: fluentbit-operator - template: - metadata: - labels: - app.kubernetes.io/component: operator - app.kubernetes.io/name: fluentbit-operator - spec: - volumes: - - name: env - emptyDir: {} - - name: dockersock - hostPath: - path: /var/run/docker.sock - initContainers: - - name: setenv - image: 'docker:19.03' - command: - - /bin/sh - - '-c' - - set -ex; - echo DOCKER_ROOT_DIR=$(docker info -f '{{.DockerRootDir}}') > /fluentbit-operator/fluent-bit.env - volumeMounts: - - name: env - mountPath: /fluentbit-operator - - name: dockersock - readOnly: true - mountPath: /var/run/docker.sock - containers: - - name: fluentbit-operator - image: 'kubespheredev/fluentbit-operator:v0.2.0' - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi - volumeMounts: - - name: env - mountPath: /fluentbit-operator - serviceAccountName: fluentbit-operator diff --git a/roles/ks-logging/files/logging/fluentbit-operator/migrator/migrator-job.yaml b/roles/ks-logging/files/logging/fluentbit-operator/migrator/migrator-job.yaml deleted file mode 100644 index d6e4f9f50..000000000 --- a/roles/ks-logging/files/logging/fluentbit-operator/migrator/migrator-job.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: fluentbit-operator-migrator - namespace: kubesphere-logging-system -spec: - template: - spec: - containers: - - name: migrator - image: kubespheredev/fluentbit-operator:migrator - restartPolicy: Never - serviceAccountName: fluentbit-operator-migrator \ No newline at end of file diff --git a/roles/ks-logging/files/logging/kibana-2.2.1.tgz b/roles/ks-logging/files/logging/kibana-2.2.1.tgz deleted file mode 100644 index 114a20613..000000000 Binary files a/roles/ks-logging/files/logging/kibana-2.2.1.tgz and /dev/null differ diff --git a/roles/ks-logging/files/logging/nginx-1.0.0.tgz b/roles/ks-logging/files/logging/nginx-1.0.0.tgz deleted file mode 100644 index 8d526909f..000000000 Binary files a/roles/ks-logging/files/logging/nginx-1.0.0.tgz and /dev/null differ diff --git a/roles/ks-logging/tasks/elasticsearch.yaml b/roles/ks-logging/tasks/elasticsearch.yaml deleted file mode 100644 index 989dbbb69..000000000 --- a/roles/ks-logging/tasks/elasticsearch.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: ks-logging | Deploy elasticsearch-logging - shell: > - {{ bin_dir }}/helm upgrade --install elasticsearch-logging - {{ kubesphere_dir }}/logging/elasticsearch-1.22.1.tgz - -f {{ kubesphere_dir }}/logging/custom-values-elasticsearch.yaml - --namespace kubesphere-logging-system - register: es_result - ignore_errors: True - - -- name: ks-logging | Get PersistentVolume Name - shell: > - kubectl get pv | grep "kubesphere-logging-system/data-elasticsearch-logging" | awk '{print $1}' - register: es_pv_name - when: - - "es_result.stderr and 'no matches for kind' in es_result.stderr" - -- name: ks-logging | Patch PersistentVolume (persistentVolumeReclaimPolicy) - shell: > - kubectl patch pv {{ item }} - -p '{"spec":{"persistentVolumeReclaimPolicy": "Retain"}}' - loop: "{{ es_pv_name.stdout_lines }}" - when: - - "es_result.stderr and 'no matches for kind' in es_result.stderr" - -- name: ks-logging | Delete elasticsearch - shell: > - {{ bin_dir }}/helm del --purge elasticsearch-logging - when: - - "es_result.stderr and 'no matches for kind' in es_result.stderr" - -- name: ks-logging | Waiting for seconds - wait_for: timeout=20 - when: - - "es_result.stderr and 'no matches for kind' in es_result.stderr" - -- name: ks-logging | Deploy elasticsearch-logging - shell: > - {{ bin_dir }}/helm upgrade --install elasticsearch-logging - {{ kubesphere_dir }}/logging/elasticsearch-1.22.1.tgz - -f {{ kubesphere_dir }}/logging/custom-values-elasticsearch.yaml - --namespace kubesphere-logging-system - register: es_result - when: - - "es_result.stderr and 'no matches for kind' in es_result.stderr" - -- fail: - msg: "{{ es_result.stderr }}" - when: - - es_result.stderr is defined - - es_result.stderr != "" - - es_result.stderr.find("no matches for kind") == -1 diff --git a/roles/ks-logging/tasks/get_old_config.yaml b/roles/ks-logging/tasks/get_old_config.yaml deleted file mode 100644 index fe9d34936..000000000 --- a/roles/ks-logging/tasks/get_old_config.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Logging | Check es-master PersistentVolumeClaim - shell: > - {{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-discovery-0 -o jsonpath='{.status.capacity.storage}' - register: es_master_pvc - ignore_errors: True - -- name: Logging | Setting es master pv size - set_fact: - es_master_pv_size: "{{ es_master_pvc.stdout }}" - when: - - es_master_pvc.rc == 0 - - es_master_pvc.stdout != "" - ignore_errors: True - -- name: Logging | Check es data PersistentVolumeClaim - shell: > - {{ bin_dir }}/kubectl get pvc -n kubesphere-logging-system data-elasticsearch-logging-data-0 -o jsonpath='{.status.capacity.storage}' - register: es_data_pvc - ignore_errors: True - -- name: Logging | Setting es data pv size - set_fact: - es_data_pv_size: "{{ es_data_pvc.stdout }}" - when: - - es_data_pvc.rc == 0 - - es_data_pvc.stdout != "" - ignore_errors: True \ No newline at end of file diff --git a/roles/ks-logging/tasks/main.yaml b/roles/ks-logging/tasks/main.yaml index 89e4179e4..3659484c5 100644 --- a/roles/ks-logging/tasks/main.yaml +++ b/roles/ks-logging/tasks/main.yaml @@ -1,188 +1,20 @@ --- -- import_tasks: pre-install.yaml - - name: ks-logging | Getting logging installation files copy: - src: "logging" + src: "fluentbit-operator" dest: "{{ kubesphere_dir }}/" -- import_tasks: get_old_config.yaml - name: ks-logging | Creating manifests template: src: "{{ item.file }}.j2" - dest: "{{ kubesphere_dir }}/logging/{{ item.file }}" + dest: "{{ kubesphere_dir }}/fluentbit-operator/{{ item.file }}" with_items: - - { path: logging, file: custom-fluentbit-fluentBit.yaml } - - { path: logging, file: custom-fluentbit-operator-deployment.yaml } - - { path: logging, file: custom-migrator-job.yaml } - - { path: logging, file: custom-output-elasticsearch.yaml } - - { path: logging, file: custom-values-elasticsearch.yaml } - - { path: logging, file: custom-values-elasticsearch-curator.yaml } - - { path: logging, file: custom-values-kibana.yaml } - - { path: logging, file: custom-output-elasticsearch-events.yaml } - - { path: logging, file: custom-output-elasticsearch-auditing.yaml } - - -- name: ks-logging | comment elasticsearch storageclass parameter - shell: > - sed -i 's/\(^.*storageClass:.*$\)/#\1/' {{ kubesphere_dir }}/logging/custom-values-elasticsearch.yaml - when: - - sts_number.stdout != "0" - - sc_number.stdout == "0" - - -- name: ks-logging | Checking elasticsearch-logging-data-svc - shell: > - {{ bin_dir }}/kubectl get svc -n kubesphere-logging-system - register: elasticsearch_logging_data_svc - ignore_errors: True - - -- name: ks-logging | Checking kubesphere-version - shell: > - {{ bin_dir }}/kubectl describe configmap -n kubesphere-system ks-console-ae-config | grep "kubesphere:" | awk '{print $2}' - register: kubesphere_version - ignore_errors: True - - -- name: ks-logging | Changing logging index - shell: > - index_name=logstash-$(date +"%Y.%m.%d" -d "-{{ item }} days"); - index_alias=ks-logstash-log-$(date +"%Y.%m.%d" -d "-{{ item }} days"); - curl -X POST "$({{ bin_dir }}/kubectl get svc elasticsearch-logging-data -n kubesphere-logging-system -o=jsonpath='{.spec.clusterIP}'):9200/_aliases" - -H 'Content-Type: application/json' - -d '{ "actions" : [ { "add" : { "index" : "'"$index_name"'", "alias" : "'"$index_alias"'" } } ] }' - with_sequence: start=0 end=6 stride=1 - when: - - elasticsearch_logging_data_svc.stdout.find("elasticsearch-logging-data") != -1 - - kubesphere_version.stdout == "2.0.0" - ignore_errors: True - - -- name: ks-logging | Backup old fluentbit crd - shell: > - {{ bin_dir }}/kubectl get fluentbits.logging.kubesphere.io -n kubesphere-logging-system fluent-bit -o yaml > {{ kubesphere_dir }}/logging/fluentbit-crd.yaml.bak - when: - - kubesphere_version.stdout.split('.')[0] == "2" - - -- name: ks-logging | Deleting old fluentbit operator - shell: > - {{ bin_dir }}/kubectl delete {{ item.type }} -n kubesphere-logging-system {{ item.name }} - loop: - - { type: deploy, name: logging-fluentbit-operator } - - { type: fluentbits.logging.kubesphere.io, name: fluent-bit } - - { type: ds, name: fluent-bit } - - { type: crd, name: fluentbits.logging.kubesphere.io } - when: - - kubesphere_version.stdout.split('.')[0] == "2" - - -- name: ks-logging | Prepare fluentbit operator setup - shell: > - {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/logging/fluentbit-operator/init - + - { name: custom-output-elasticsearch-logging, file: custom-output-elasticsearch-logging.yaml } -- name: ks-logging | Migrate fluentbit operator old config +- name: ks-logging | Apply fluentbit operator custom resources shell: > - {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/{{ item.path }}/{{ item.file }} - loop: - - {path: logging/fluentbit-operator/migrator, file: migrator-rbac.yaml} - - {path: logging, file: custom-migrator-job.yaml} - when: - - kubesphere_version.stdout.split('.')[0] == "2" - - -- name: ks-logging | Deploy new fluentbit operator - shell: > - {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/{{ item.path }}/{{ item.file }} - loop: - - {path: logging/fluentbit-operator, file: fluent-bit} - - {path: logging, file: custom-output-elasticsearch.yaml} - - {path: logging, file: custom-output-elasticsearch-events.yaml} - - {path: logging, file: custom-output-elasticsearch-auditing.yaml} - register: import - until: import is succeeded - retries: 5 - delay: 3 - -- name: ks-logging | Check elasticsearch - shell: > - {{ bin_dir }}/helm list elasticsearch-logging - register: es_check - ignore_errors: True - - -- import_tasks: elasticsearch.yaml - when: - - logging.externalElasticsearchUrl is not defined - - logging.externalElasticsearchPort is not defined - - (es_check.stdout.find("DEPLOYED") == -1) or (es_check.stdout.find("6.7.0-0217") == -1) - - -- name: ks-logging | Check elasticsearch-logging-curator - shell: > - {{ bin_dir }}/helm list elasticsearch-logging-curator - register: curator_check - ignore_errors: True - - -- name: ks-logging | Deploy elasticsearch-logging-curator - shell: > - {{ bin_dir }}/helm upgrade --install elasticsearch-logging-curator - {{ kubesphere_dir }}/logging/elasticsearch-curator-1.3.3.tgz - -f {{ kubesphere_dir }}/logging/custom-values-elasticsearch-curator.yaml - --namespace kubesphere-logging-system - when: - - (curator_check.stdout.find("DEPLOYED") == -1) or (curator_check.stdout.find("5.5.4-0217") == -1) - - -- block: - - name: ks-logging | Check elasticsearch-logging-kibana - shell: > - {{ bin_dir }}/helm list elasticsearch-logging-kibana - register: kibana_check - ignore_errors: True - - - name: ks-logging | Deploy elasticsearch-logging-kibana - shell: > - {{ bin_dir }}/helm upgrade --install elasticsearch-logging-kibana - {{ kubesphere_dir }}/logging/kibana-2.2.1.tgz - -f {{ kubesphere_dir }}/logging/custom-values-kibana.yaml - --namespace kubesphere-logging-system - register: kibana_result - ignore_errors: True - when: - - (kibana_check.stdout.find("DEPLOYED") == -1) or (kibana_check.stdout.find("6.7.0-0217") == -1) - - - name: ks-logging | Delete elasticsearch-logging-kibana - shell: > - {{ bin_dir }}/helm del --purge elasticsearch-logging-kibana - ignore_errors: True - when: - - (kibana_check.stdout.find("DEPLOYED") == -1) or (kibana_check.stdout.find("6.7.0-0217") == -1) - - "kibana_result.stderr and 'no matches for kind' in kibana_result.stderr" - - - name: ks-logging | Waiting for seconds - wait_for: timeout=15 - when: - - (kibana_check.stdout.find("DEPLOYED") == -1) or (kibana_check.stdout.find("6.7.0-0217") == -1) - - "kibana_result.stderr and 'no matches for kind' in kibana_result.stderr" - - - name: ks-logging | Deploy elasticsearch-logging-kibana - shell: > - {{ bin_dir }}/helm upgrade --install elasticsearch-logging-kibana - {{ kubesphere_dir }}/logging/kibana-2.2.1.tgz - -f {{ kubesphere_dir }}/logging/custom-values-kibana.yaml - --namespace kubesphere-logging-system - when: - - (kibana_check.stdout.find("deployed") == -1) or (kibana_check.stdout.find("6.7.0-0217") == -1) - - "kibana_result.stderr and 'no matches for kind' in kibana_result.stderr" - when: - - logging.kibana is defined - - logging.kibana.enabled is defined - - logging.kibana.enabled == true + {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/fluentbit-operator - import_tasks: logsidecar.yaml diff --git a/roles/ks-logging/tasks/pre-install.yaml b/roles/ks-logging/tasks/pre-install.yaml deleted file mode 100644 index 2b754bb70..000000000 --- a/roles/ks-logging/tasks/pre-install.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - -- name: ks-logging | Checking elasticsearch-logging - shell: > - {{ bin_dir }}/helm list | grep "elasticsearch-logging" | grep "elasticsearch-1.10.2" - register: check_es_result - ignore_errors: True - tags: - - elasticsearch - - -- name: ks-logging | Remove old elasticsearch - shell: "{{ bin_dir }}/helm del --purge elasticsearch-logging" - when: - - check_es_result.stdout.find("kubesphere-logging-system") != -1 - ignore_errors: True - tags: - - elasticsearch - -- name: ks-logging | check elasticsearch data StatefulSet - shell: > - {{ bin_dir }}/kubectl get sts -n kubesphere-logging-system | grep "elasticsearch-logging-data" | wc -l - register: sts_number - -- name: ks-logging | check elasticsearch storageclass - shell: > - {{ bin_dir }}/kubectl get sts -n kubesphere-logging-system - elasticsearch-logging-data -o yaml | grep "storageClassName" | wc -l - register: sc_number - -- name: ks-logging | set elasticsearch storageclass parameter - set_fact: - persistence: - storageClass: "" - when: - - sts_number.stdout != "0" - - sc_number.stdout == "0" diff --git a/roles/ks-logging/templates/custom-output-elasticsearch-auditing.yaml.j2 b/roles/ks-logging/templates/custom-output-elasticsearch-auditing.yaml.j2 deleted file mode 100644 index a6be70016..000000000 --- a/roles/ks-logging/templates/custom-output-elasticsearch-auditing.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - name: es-auditing - namespace: kubesphere-logging-system - labels: - logging.kubesphere.io/enabled: "true" - logging.kubesphere.io/component: "auditing" -spec: - match: kube_auditing - es: - logstashFormat: true - host: "{% if logging.externalElasticsearchUrl is defined %}{{ logging.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" - logstashPrefix: "ks-{{ logging.elkPrefix }}-auditing" - port: {% if logging.externalElasticsearchPort is defined %}{{ logging.externalElasticsearchPort }}{% else %}9200{% endif %} - diff --git a/roles/ks-logging/templates/custom-output-elasticsearch-events.yaml.j2 b/roles/ks-logging/templates/custom-output-elasticsearch-events.yaml.j2 deleted file mode 100644 index 603d33ebf..000000000 --- a/roles/ks-logging/templates/custom-output-elasticsearch-events.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - name: es-events - namespace: kubesphere-logging-system - labels: - logging.kubesphere.io/enabled: "true" - logging.kubesphere.io/component: "events" -spec: - match: kube_events - es: - logstashFormat: true - host: "{% if logging.externalElasticsearchUrl is defined %}{{ logging.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" - logstashPrefix: "ks-{{ logging.elkPrefix }}-events" - port: {% if logging.externalElasticsearchPort is defined %}{{ logging.externalElasticsearchPort }}{% else %}9200{% endif %} diff --git a/roles/ks-logging/templates/custom-output-elasticsearch-logging.yaml.j2 b/roles/ks-logging/templates/custom-output-elasticsearch-logging.yaml.j2 new file mode 100644 index 000000000..f9fe95228 --- /dev/null +++ b/roles/ks-logging/templates/custom-output-elasticsearch-logging.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: logging.kubesphere.io/v1alpha2 +kind: Output +metadata: + name: es + namespace: kubesphere-logging-system + labels: + logging.kubesphere.io/enabled: "true" + logging.kubesphere.io/component: "logging" +spec: + match: kube.* + es: + host: "{% if common.es.externalElasticsearchUrl is defined %}{{ common.es.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" + port: {% if common.es.externalElasticsearchPort is defined %}{{ common.es.externalElasticsearchPort }}{% else %}9200{% endif %} + + logstashPrefix: "ks-{{ common.es.elkPrefix }}-log" + logstashFormat: true + timeKey: "@timestamp" diff --git a/roles/ks-logging/templates/custom-output-elasticsearch.yaml.j2 b/roles/ks-logging/templates/custom-output-elasticsearch.yaml.j2 deleted file mode 100644 index 8905091fa..000000000 --- a/roles/ks-logging/templates/custom-output-elasticsearch.yaml.j2 +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: logging.kubesphere.io/v1alpha2 -kind: Output -metadata: - name: es - namespace: kubesphere-logging-system - labels: - logging.kubesphere.io/enabled: "true" - logging.kubesphere.io/component: "logging" -spec: - match: kube.* - es: - host: "{% if logging.externalElasticsearchUrl is defined %}{{ logging.externalElasticsearchUrl }}{% else %}elasticsearch-logging-data.kubesphere-logging-system.svc{% endif %}" - port: {% if logging.externalElasticsearchPort is defined %}{{ logging.externalElasticsearchPort }}{% else %}9200{% endif %} - - logstashPrefix: "ks-{{ logging.elkPrefix }}-log" - logstashFormat: true - timeKey: "@timestamp" diff --git a/roles/ks-logging/templates/custom-values-kibana.yaml.j2 b/roles/ks-logging/templates/custom-values-kibana.yaml.j2 deleted file mode 100644 index 345e4c0ad..000000000 --- a/roles/ks-logging/templates/custom-values-kibana.yaml.j2 +++ /dev/null @@ -1,227 +0,0 @@ -image: - repository: {{ kibana_oss_repo }} - tag: {{ kibana_oss_tag }} - pullPolicy: "IfNotPresent" - -testFramework: - image: {{ testFramework_repo }} - tag: {{ testFramework_tag }} - -commandline: - args: [] - -env: {} - # All Kibana configuration options are adjustable via env vars. - # To adjust a config option to an env var uppercase + replace `.` with `_` - # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html - # - # ELASTICSEARCH_URL: http://elasticsearch-client:9200 - # SERVER_PORT: 5601 - # LOGGING_VERBOSE: "true" - # SERVER_DEFAULTROUTE: "/app/kibana" - -files: - kibana.yml: - ## Default Kibana configuration from kibana-docker. - server.name: kibana - server.host: "0" - elasticsearch.url: http://elasticsearch-logging-data.kubesphere-logging-system.svc:9200 - - ## Custom config properties below - ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html - # server.port: 5601 - # logging.verbose: "true" - # server.defaultRoute: "/app/kibana" - -deployment: - annotations: {} - -service: - type: ClusterIP - # clusterIP: None - # portName: kibana-svc - externalPort: 443 - internalPort: 5601 - # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer - ## External IP addresses of service - ## Default: nil - ## - # externalIPs: - # - 192.168.0.1 - # - ## LoadBalancer IP if service.type is LoadBalancer - ## Default: nil - ## - # loadBalancerIP: 10.2.2.2 - annotations: {} - # Annotation example: setup ssl with aws cert when service.type is LoadBalancer - # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT - labels: {} - ## Label example: show service URL in `kubectl cluster-info` - # kubernetes.io/cluster-service: "true" - ## Limit load balancer source ips to list of CIDRs (where available) - # loadBalancerSourceRanges: [] - selector: {} - -ingress: - enabled: false - # hosts: - # - kibana.localhost.localdomain - # - localhost.localdomain/kibana - # annotations: - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # tls: - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -serviceAccount: - # Specifies whether a service account should be created - create: false - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # If set and create is false, the service account must be existing - name: - -livenessProbe: - enabled: false - path: /status - initialDelaySeconds: 30 - timeoutSeconds: 10 - -readinessProbe: - enabled: false - path: /status - initialDelaySeconds: 30 - timeoutSeconds: 10 - periodSeconds: 10 - successThreshold: 5 - -# Enable an authproxy. Specify container in extraContainers -authProxyEnabled: false - -extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - --resource=uri=/* -# - --discovery-url=https://discovery-url -# - --client-id=client -# - --client-secret=secret -# - --listen=0.0.0.0:5602 -# - --upstream-url=http://127.0.0.1:5601 -# ports: -# - name: web -# containerPort: 9090 - -extraVolumeMounts: [] - -extraVolumes: [] - -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi - -priorityClassName: "" - -# Affinity for pod assignment -# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -# affinity: {} - -# Tolerations for pod assignment -# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -tolerations: [{key: "CriticalAddonsOnly", operator: "Exists"}, {key: "dedicated", value: "log", effect: "NoSchedule"}] - -# Node labels for pod assignment -# Ref: https://kubernetes.io/docs/user-guide/node-selection/ -nodeSelector: {} - -podAnnotations: {} -replicaCount: 1 -revisionHistoryLimit: 3 - -# Custom labels for pod assignment -podLabels: {} - -# To export a dashboard from a running Kibana 6.3.x use: -# curl --user : -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard= > my-dashboard.json -# A dashboard is defined by a name and a string with the json payload or the download url -dashboardImport: - enabled: false - timeout: 60 - xpackauth: - enabled: false - username: myuser - password: mypass - dashboards: {} - # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json - -# List of plugins to install using initContainer -# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well. -plugins: - # set to true to enable plugins installation - enabled: false - # set to true to remove all kibana plugins before installation - reset: false - # Use to add/upgrade plugin - values: - # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip - # - logtrail,0.1.31,https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-6.6.0-0.1.31.zip - # - other_plugin - -persistentVolumeClaim: - # set to true to use pvc - enabled: false - # set to true to use you own pvc - existingClaim: false - annotations: {} - - accessModes: - - ReadWriteOnce - size: "5Gi" - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - -# default security context -securityContext: - enabled: false - allowPrivilegeEscalation: false - runAsUser: 1000 - fsGroup: 2000 - -extraConfigMapMounts: [] - # - name: logtrail-configs - # configMap: kibana-logtrail - # mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json - # subPath: logtrail.json - -# Add your own init container or uncomment and modify the given example. -initContainers: {} - ## Don't start kibana till Elasticsearch is reachable. - ## Ensure that it is available at http://elasticsearch:9200 - ## - # es-check: # <- will be used as container name - # image: "appropriate/curl:latest" - # imagePullPolicy: "IfNotPresent" - # command: - # - "/bin/sh" - # - "-c" - # - | - # is_down=true - # while "$is_down"; do - # if curl -sSf --fail-early --connect-timeout 5 http://elasticsearch:9200; then - # is_down=false - # else - # sleep 5 - # fi - # done