diff --git a/class/defaults.yml b/class/defaults.yml index e0adaa4..f11e279 100644 --- a/class/defaults.yml +++ b/class/defaults.yml @@ -56,43 +56,14 @@ parameters: for: 6h severity: warning - clusterLogging: - managementState: Managed - logStore: - type: lokistack - lokistack: - name: loki - elasticsearch: - nodeCount: 3 - storage: - size: 200Gi - redundancyPolicy: SingleRedundancy - nodeSelector: - node-role.kubernetes.io/infra: '' - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - visualization: - type: kibana - kibana: - replicas: 2 - nodeSelector: - node-role.kubernetes.io/infra: '' - collection: - type: vector + clusterLogging: {} clusterLogForwarding: enabled: false forwarders: {} namespace_groups: {} - application_logs: {} + application_logs: + enabled: true audit_logs: enabled: false infrastructure_logs: diff --git a/component/config_forwarding.libsonnet b/component/config_forwarding.libsonnet new file mode 100644 index 0000000..639d510 --- /dev/null +++ b/component/config_forwarding.libsonnet @@ -0,0 +1,185 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local lib = import 'lib/openshift4-logging.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.openshift4_logging; + +local deployLokistack = params.components.lokistack.enabled; +local deployElasticsearch = params.components.elasticsearch.enabled; +local forwardingOnly = !deployLokistack && !deployElasticsearch; + +local pipelineOutputRefs(pipeline) = + local default = if forwardingOnly then [] else [ 'default' ]; + std.get(pipeline, 'forwarders', []) + default; + +// Apply default config for application logs. +local patchAppLogs = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.application_logs), + local enablePipeline = params.clusterLogForwarding.application_logs.enabled && std.length(outputRefs) > 0, + local enableMultilineErrors = std.get(params.clusterLogForwarding.application_logs, 'detectMultilineErrors', false), + + pipelines: { + [if enablePipeline then 'application-logs']: { + inputRefs: [ 'application' ], + outputRefs: outputRefs, + // [if enableMultilineErrors then 'detectMultilineErrors']: true, + }, + }, +}; + +// Apply default config for infra logs. +local patchInfraLogs = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs), + local enablePipeline = params.clusterLogForwarding.infrastructure_logs.enabled && std.length(outputRefs) > 0, + local enableMultilineErrors = std.get(params.clusterLogForwarding.infrastructure_logs, 'detectMultilineErrors', false), + + pipelines: { + [if enablePipeline then 'infrastructure-logs']: { + inputRefs: [ 'infrastructure' ], + outputRefs: outputRefs, + }, + }, +}; + +// Apply default config for audit logs. +local patchAuditLogs = { + local outputRefs = pipelineOutputRefs(params.clusterLogForwarding.audit_logs), + local enablePipeline = params.clusterLogForwarding.audit_logs.enabled && std.length(outputRefs) > 0, + + pipelines: { + [if enablePipeline then 'audit-logs']: { + inputRefs: [ 'audit' ], + outputRefs: outputRefs, + }, + }, +}; + +// Enable json parsing for default pipelines if configured. +local patchJsonLogging = { + local enableAppLogs = std.get(params.clusterLogForwarding.application_logs, 'json', false), + local enableInfraLogs = std.get(params.clusterLogForwarding.infrastructure_logs, 'json', false), + + pipelines: { + [if enableAppLogs then 'application-logs']: { parse: 'json' }, + [if enableInfraLogs then 'infrastructure-logs']: { parse: 'json' }, + }, + [if deployElasticsearch && params.clusterLogForwarding.json.enabled then 'outputDefaults']: { + elasticsearch: { + structuredTypeKey: params.clusterLogForwarding.json.typekey, + structuredTypeName: params.clusterLogForwarding.json.typename, + }, + }, +}; + +// Enable detectMultilineErrors for default pipelines if configured. +local patchMultilineErrors = { + local enableAppLogs = std.get(params.clusterLogForwarding.application_logs, 'detectMultilineErrors', false), + local enableInfraLogs = std.get(params.clusterLogForwarding.infrastructure_logs, 'detectMultilineErrors', false), + + pipelines: { + [if enableAppLogs then 'application-logs']: { detectMultilineErrors: true }, + [if enableInfraLogs then 'infrastructure-logs']: { detectMultilineErrors: true }, + }, +}; + +// --- legacy clusterLogForwarding.namespace config +local namespaceGroups = ( + if std.objectHas(params.clusterLogForwarding, 'namespaces') then + { + [ns]: { + namespaces: [ ns ], + forwarders: [ params.clusterLogForwarding.namespaces[ns].forwarder ], + } + for ns in std.objectFields(params.clusterLogForwarding.namespaces) + } else {} +) + params.clusterLogForwarding.namespace_groups; +// --- end legacy patch + +// Add inputs entry for every namespace_group defined in params.clusterLogForwarding.namespace_groups +local patchCustomInputs = { + [if std.length(namespaceGroups) > 0 then 'inputs']: { + [group]: { + application: { + namespaces: namespaceGroups[group].namespaces, + }, + } + for group in std.objectFields(namespaceGroups) + }, +}; + +// Add pipelines entry for every namespace_group defined in params.clusterLogForwarding.namespace_groups +local patchCustomPipelines = { + [if std.length(namespaceGroups) > 0 then 'pipelines']: { + local enableJson = std.get(namespaceGroups[group], 'json', false), + local enableMultilineError = std.get(namespaceGroups[group], 'detectMultilineErrors', false), + + [group]: { + inputRefs: [ group ], + outputRefs: std.get(namespaceGroups[group], 'forwarders', []), + [if enableJson then 'parse']: 'json', + [if enableMultilineError then 'detectMultilineErrors']: true, + } + for group in std.objectFields(namespaceGroups) + }, +}; + +// Add outputs entry for every forwarder defined in params.clusterLogForwarding.forwarders. +local patchCustomOutputs = { + [if std.length(params.clusterLogForwarding.forwarders) > 0 then 'outputs']: { + [name]: params.clusterLogForwarding.forwarders[name] + for name in std.objectFields(params.clusterLogForwarding.forwarders) + }, +}; + +local logForwarderSpec = std.foldl( + function(manifest, patch) std.mergePatch(manifest, patch), + [ + patchAppLogs, + patchInfraLogs, + patchAuditLogs, + patchJsonLogging, + patchMultilineErrors, + patchCustomInputs, + patchCustomOutputs, + patchCustomPipelines, + ], + { + inputs: {}, + outputs: {}, + pipelines: {}, + } +); + +// +local clusterLogForwarder = lib.ClusterLogForwarder(params.namespace, 'instance') { + spec: { + [key]: logForwarderSpec[key] + for key in std.objectFields(logForwarderSpec) + if !std.member([ 'inputs', 'outputs', 'pipelines' ], key) + } + { + // Unfold xxxObj into array + [if std.length(logForwarderSpec.inputs) > 0 then 'inputs']: [ + { name: name } + logForwarderSpec.inputs[name] + for name in std.objectFields(logForwarderSpec.inputs) + ], + [if std.length(logForwarderSpec.outputs) > 0 then 'outputs']: [ + { name: name } + logForwarderSpec.outputs[name] + for name in std.objectFields(logForwarderSpec.outputs) + ], + [if std.length(logForwarderSpec.pipelines) > 0 then 'pipelines']: [ + { name: name } + logForwarderSpec.pipelines[name] + for name in std.objectFields(logForwarderSpec.pipelines) + ], + }, +}; + +// Define outputs below +if params.clusterLogForwarding.enabled then + { + '31_cluster_logforwarding': clusterLogForwarder, + } +else + std.trace( + 'Log forwarding disabled, not deploying ClusterLogForwarder', + {} + ) diff --git a/component/config_logging.libsonnet b/component/config_logging.libsonnet new file mode 100644 index 0000000..aba9064 --- /dev/null +++ b/component/config_logging.libsonnet @@ -0,0 +1,119 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local lib = import 'lib/openshift4-logging.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.openshift4_logging; + +local deployLokistack = params.components.lokistack.enabled; +local deployElasticsearch = params.components.elasticsearch.enabled; + +// Apply defaults for Lokistack. +local patchLokistackDefaults = { + [if deployLokistack then 'spec']: { + logStore: { + type: 'lokistack', + lokistack: { + name: 'loki', + }, + }, + }, +}; + +// Apply defaults for Elasticsearch. +local patchElasticsearchDefaults = { + [if deployElasticsearch then 'spec']: { + logStore: { + elasticsearch: { + nodeCount: 3, + storage: { + size: '200Gi', + }, + redundancyPolicy: 'SingleRedundancy', + nodeSelector: { + 'node-role.kubernetes.io/infra': '', + }, + }, + retentionPolicy: { + application: { + maxAge: '7d', + pruneNamespacesInterval: '15m', + }, + infra: { + maxAge: '30d', + pruneNamespacesInterval: '15m', + }, + audit: { + maxAge: '30d', + pruneNamespacesInterval: '15m', + }, + }, + }, + visualization: { + type: 'kibana', + kibana: { + replicas: 2, + nodeSelector: { + 'node-role.kubernetes.io/infra': '', + }, + }, + }, + }, +}; + +// Apply customisations from params.clusterLogging. +local patchLoggingConfig = { + spec: params.clusterLogging { + collection: { + // Don't include legacy config key 'collection.logs'. + [it]: params.clusterLogging.collection[it] + for it in std.objectFields(std.get(params.clusterLogging, 'collection', {})) + if it != 'logs' + }, + }, +}; + +// --- patch deprecated logging resource +local patchLegacyConfig = { + local legacyConfig = std.get(std.get(params.clusterLogging, 'collection', { collection: {} }), 'logs', {}), + local legacyType = std.get(legacyConfig, 'type', ''), + local legacyFluentd = std.get(legacyConfig, 'fluentd', {}), + + spec: { + collection: if std.length(legacyConfig) > 0 then std.trace( + 'Parameter `clusterLogging.collector.logs` is deprecated. Please update your config to use `clusterLogging.collector`', + { + [if legacyType != '' then 'type']: legacyType, + } + legacyFluentd, + ) else {}, + }, +}; +// --- patch end + +local clusterLogging = std.foldl( + function(manifest, patch) std.mergePatch(manifest, patch), + [ + patchLokistackDefaults, + patchElasticsearchDefaults, + patchLoggingConfig, + patchLegacyConfig, + ], + lib.ClusterLogging(params.namespace, 'instance') { + metadata+: { + annotations+: { + 'argocd.argoproj.io/sync-options': 'SkipDryRunOnMissingResource=true', + }, + }, + spec: { + managementState: 'Managed', + logStore: {}, + collection: { + type: 'vector', + }, + }, + } +); + +// Define outputs below +{ + '30_cluster_logging': clusterLogging, +} diff --git a/component/main.jsonnet b/component/main.jsonnet index 8d95afd..1ad6ae8 100644 --- a/component/main.jsonnet +++ b/component/main.jsonnet @@ -1,228 +1,95 @@ local com = import 'lib/commodore.libjsonnet'; local kap = import 'lib/kapitan.libjsonnet'; local kube = import 'lib/kube.libjsonnet'; -local operatorlib = import 'lib/openshift4-operators.libsonnet'; +local operator = import 'lib/openshift4-operators.libsonnet'; + +local config = import 'config.libsonnet'; local inv = kap.inventory(); local params = inv.parameters.openshift4_logging; -local group = 'operators.coreos.com/'; -local clusterLoggingGroupVersion = 'logging.openshift.io/v1'; +local deployLokistack = params.components.lokistack.enabled; +local deployElasticsearch = params.components.elasticsearch.enabled; -local forwardingOnly = !params.components.elasticsearch.enabled && !params.components.lokistack.enabled; +// Namespace -local namespace_groups = ( - if std.objectHas(params.clusterLogForwarding, 'namespaces') then - { - [ns]: { - namespaces: [ ns ], - forwarders: [ params.clusterLogForwarding.namespaces[ns].forwarder ], - } - for ns in std.objectFields(params.clusterLogForwarding.namespaces) - } - else - {} -) + params.clusterLogForwarding.namespace_groups; +local namespace = kube.Namespace(params.namespace) { + metadata+: { + annotations+: { + 'openshift.io/node-selector': '', + }, + labels+: { + 'openshift.io/cluster-monitoring': 'true', + }, + }, +}; -// --- Patch deprecated logging resource -local legacyCollectionConfig = std.get(params.clusterLogging.collection, 'logs', {}); -local legacyCollectionPatch = if std.length(legacyCollectionConfig) > 0 then std.trace( - 'Parameter `clusterLogging.collector.logs` is deprecated. Please update your config to use `clusterLogging.collector`', - { - local type = std.get(legacyCollectionConfig, 'type', ''), - local fluentd = std.get(legacyCollectionConfig, 'fluentd', {}), - collection+: { - [if type != '' then 'type']: type, - } + if std.length(fluentd) > 0 then fluentd, - } -) else {}; +// OperatorGroup -local clusterLogging = std.mergePatch( - params.clusterLogging { - collection: { - [it]: params.clusterLogging.collection[it] - for it in std.objectFields(params.clusterLogging.collection) - if it != 'logs' - }, - } + legacyCollectionPatch, - { - // Patch to remove certain keys, as the ClusterLogging operator would just - // deploy elasticsearch or kibana if they are configured - [if forwardingOnly then 'logStore']: null, - } -); -// --- End patch +local operatorGroup = operator.OperatorGroup('cluster-logging') { + metadata+: { + namespace: params.namespace, + }, + spec: { + targetNamespaces: [ + params.namespace, + ], + }, +}; -local pipelineOutputRefs(pipeline) = - local default = if forwardingOnly then [] else [ 'default' ]; - std.get(pipeline, 'forwarders', []) + default; +// Subscriptions -{ - '00_namespace': kube.Namespace(params.namespace) { - metadata+: { - annotations+: { - 'openshift.io/node-selector': '', - }, - labels+: { - 'openshift.io/cluster-monitoring': 'true', - }, +local logging = operator.namespacedSubscription( + params.namespace, + 'cluster-logging', + params.channel, + 'redhat-operators' +) { + spec+: { + config+: { + resources: params.operatorResources.clusterLogging, }, }, - '10_operator_group': operatorlib.OperatorGroup('cluster-logging') { - metadata+: { - namespace: params.namespace, - }, - spec: { - targetNamespaces: [ - params.namespace, - ], +}; + +local lokistack = if deployLokistack then operator.managedSubscription( + 'openshift-operators-redhat', + 'loki-operator', + params.channel +) { + spec+: { + config+: { + resources: params.operatorResources.lokistack, }, }, - '20_subscriptions': [ - operatorlib.namespacedSubscription( - params.namespace, - 'cluster-logging', - params.channel, - 'redhat-operators' - ) { - spec+: { - config+: { - resources: params.operatorResources.clusterLogging, - }, - }, - }, - ] + ( - if params.components.lokistack.enabled then [ - operatorlib.managedSubscription( - 'openshift-operators-redhat', - 'loki-operator', - params.channel - ) { - spec+: { - config+: { - resources: params.operatorResources.lokistack, - }, - }, - }, - ] else [] - ) + ( - if params.components.elasticsearch.enabled then [ - operatorlib.managedSubscription( - 'openshift-operators-redhat', - 'elasticsearch-operator', - params.channel - ) { - spec+: { - config+: { - resources: params.operatorResources.elasticsearch, - }, - }, - }, - ] else [] - ), - '30_cluster_logging': std.mergePatch( - // ClusterLogging resource from inventory - kube._Object(clusterLoggingGroupVersion, 'ClusterLogging', 'instance') { - metadata+: { - namespace: params.namespace, - annotations+: { - 'argocd.argoproj.io/sync-options': 'SkipDryRunOnMissingResource=true', - }, - }, - spec: clusterLogging, - }, { - // Patch to remove certain keys, as the ClusterLogging operator would just - // deploy elasticsearch or kibana if they are configured - spec: { - logStore: { - [if !params.components.elasticsearch.enabled then 'elasticsearch']: null, - [if !params.components.lokistack.enabled then 'lokistack']: null, - }, - [if !params.components.elasticsearch.enabled then 'visualization']: null, - }, - } - ), - [if params.clusterLogForwarding.enabled then '31_cluster_logforwarding']: kube._Object(clusterLoggingGroupVersion, 'ClusterLogForwarder', 'instance') { - metadata+: { - namespace: params.namespace, +}; + +local elasticsearch = if deployElasticsearch then operator.managedSubscription( + 'openshift-operators-redhat', + 'elasticsearch-operator', + params.channel +) { + spec+: { + config+: { + resources: params.operatorResources.elasticsearch, }, - spec: { - [if params.clusterLogForwarding.json.enabled then 'outputDefaults']: { - elasticsearch: { - structuredTypeKey: params.clusterLogForwarding.json.typekey, - structuredTypeName: params.clusterLogForwarding.json.typename, - }, - }, - [if std.length(params.clusterLogForwarding.forwarders) > 0 then 'outputs']: [ - params.clusterLogForwarding.forwarders[fw] { name: fw } - for fw in std.objectFields(params.clusterLogForwarding.forwarders) - ], - [if std.length(namespace_groups) > 0 then 'inputs']: [ - { - name: group, - application: { - namespaces: namespace_groups[group].namespaces, - }, - } - for group in std.objectFields(namespace_groups) - ], - [if std.length(namespace_groups) > 0 then 'pipelines']: [ - local enable_json = com.getValueOrDefault(namespace_groups[group], 'json', false); - local enable_multilineErrors = com.getValueOrDefault(namespace_groups[group], 'detectMultilineErrors', false); - local patch_json = { outputRefs: [ 'default' ], parse: 'json' }; - { - name: group, - inputRefs: [ group ], - outputRefs: com.getValueOrDefault(namespace_groups[group], 'forwarders', []), - } + com.makeMergeable(if enable_json then patch_json else {}) - + com.makeMergeable(if enable_multilineErrors then { detectMultilineErrors: true } else {}) - for group in std.objectFields(namespace_groups) - ], - } + com.makeMergeable( - local enable_pipeline = std.length(pipelineOutputRefs(params.clusterLogForwarding.application_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.application_logs, 'json', false); - local enable_multilineErrors = com.getValueOrDefault(params.clusterLogForwarding.application_logs, 'detectMultilineErrors', false); - { - [if enable_pipeline then 'pipelines']: [ - { - name: 'application-logs', - inputRefs: [ 'application' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.application_logs), - [if enable_json then 'parse']: 'json', - [if enable_multilineErrors then 'detectMultilineErrors']: true, - }, - ], - } - ) + com.makeMergeable( - local enable_pipeline = params.clusterLogForwarding.infrastructure_logs.enabled && std.length(pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.infrastructure_logs, 'json', false); - local enable_multilineErrors = com.getValueOrDefault(params.clusterLogForwarding.infrastructure_logs, 'detectMultilineErrors', false); - { - [if enable_pipeline then 'pipelines']: [ - { - name: 'infrastructure-logs', - inputRefs: [ 'infrastructure' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.infrastructure_logs), - [if enable_json then 'parse']: 'json', - [if enable_multilineErrors then 'detectMultilineErrors']: true, - }, - ], - } - ) + com.makeMergeable( - local enable_pipeline = params.clusterLogForwarding.audit_logs.enabled && std.length(pipelineOutputRefs(params.clusterLogForwarding.application_logs)) > 0; - local enable_json = com.getValueOrDefault(params.clusterLogForwarding.audit_logs, 'json', false); - { - [if params.clusterLogForwarding.audit_logs.enabled then 'pipelines']: [ - { - name: 'audit-logs', - inputRefs: [ 'audit' ], - outputRefs: pipelineOutputRefs(params.clusterLogForwarding.audit_logs), - }, - ], - } - ), }, +}; + +local subscriptions = std.filter(function(it) it != null, [ + logging, + lokistack, + elasticsearch, +]); + +// Define outputs below +{ + '00_namespace': namespace, + '10_operator_group': operatorGroup, + '20_subscriptions': subscriptions, } ++ (import 'config_logging.libsonnet') ++ (import 'config_forwarding.libsonnet') + (import 'loki.libsonnet') + (import 'elasticsearch.libsonnet') + (import 'alertrules.libsonnet') diff --git a/lib/openshift4-logging.libsonnet b/lib/openshift4-logging.libsonnet new file mode 100644 index 0000000..dd2a061 --- /dev/null +++ b/lib/openshift4-logging.libsonnet @@ -0,0 +1,18 @@ +local kube = import 'lib/kube.libjsonnet'; + +local ClusterLogging(namespace, name) = kube._Object('logging.openshift.io/v1', 'ClusterLogging', name) { + metadata+: { + namespace: namespace, + }, +}; + +local ClusterLogForwarder(namespace, name) = kube._Object('logging.openshift.io/v1', 'ClusterLogForwarder', name) { + metadata+: { + namespace: namespace, + }, +}; + +{ + ClusterLogging: ClusterLogging, + ClusterLogForwarder: ClusterLogForwarder, +} diff --git a/tests/elasticsearch.yml b/tests/elasticsearch.yml index 7310ac9..49834f6 100644 --- a/tests/elasticsearch.yml +++ b/tests/elasticsearch.yml @@ -41,3 +41,18 @@ parameters: type: fluentd logStore: type: elasticsearch + clusterLogForwarding: + enabled: true + forwarders: + custom-forwarder: + type: syslog + namespace_groups: + my-apps: + namespaces: + - app-one + - app-two + forwarders: + - custom-forwarder + json: true + # application_logs: + # enabled: false diff --git a/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/defaults/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed diff --git a/tests/golden/elasticsearch/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml b/tests/golden/elasticsearch/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml new file mode 100644 index 0000000..c51f960 --- /dev/null +++ b/tests/golden/elasticsearch/openshift4-logging/openshift4-logging/31_cluster_logforwarding.yaml @@ -0,0 +1,35 @@ +apiVersion: logging.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + annotations: {} + labels: + name: instance + name: instance + namespace: openshift-logging +spec: + inputs: + - application: + namespaces: + - app-one + - app-two + name: my-apps + outputs: + - name: custom-forwarder + type: syslog + pipelines: + - inputRefs: + - application + name: application-logs + outputRefs: + - default + - inputRefs: + - infrastructure + name: infrastructure-logs + outputRefs: + - default + - inputRefs: + - my-apps + name: my-apps + outputRefs: + - custom-forwarder + parse: json diff --git a/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/master/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed diff --git a/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml b/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml index ec6cac6..307f0ca 100644 --- a/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml +++ b/tests/golden/multilineerr/openshift4-logging/openshift4-logging/30_cluster_logging.yaml @@ -13,15 +13,5 @@ spec: logStore: lokistack: name: loki - retentionPolicy: - application: - maxAge: 7d - pruneNamespacesInterval: 15m - audit: - maxAge: 30d - pruneNamespacesInterval: 15m - infra: - maxAge: 30d - pruneNamespacesInterval: 15m type: lokistack managementState: Managed