diff --git a/.reuse/dep5 b/.reuse/dep5
index 2f3938a72..4bc07c4d3 100644
--- a/.reuse/dep5
+++ b/.reuse/dep5
@@ -563,6 +563,10 @@ Files: crd-catalog/NetApp/trident/*
Copyright: The NetApp/trident Authors
License: Apache-2.0
+Files: crd-catalog/netobserv/network-observability-operator/*
+Copyright: The netobserv/network-observability-operator Authors
+License: Apache-2.0
+
Files: crd-catalog/nginxinc/kubernetes-ingress/*
Copyright: The nginxinc/kubernetes-ingress Authors
License: Apache-2.0
diff --git a/code-generator/src/catalog.rs b/code-generator/src/catalog.rs
index f3e5ef561..8c5356cbe 100644
--- a/code-generator/src/catalog.rs
+++ b/code-generator/src/catalog.rs
@@ -1471,6 +1471,13 @@ pub const CRD_V1_SOURCES: &'static [UpstreamSource] = &[
"https://github.com/NetApp/trident/blob/master/deploy/crds/trident.netapp.io_tridentorchestrators_crd.yaml",
],
},
+ UpstreamSource {
+ project_name: "netobserv/network-observability-operator",
+ license: APACHE_V2,
+ urls: &[
+ "https://github.com/netobserv/network-observability-operator/blob/main/config/crd/bases/flows.netobserv.io_flowcollectors.yaml",
+ ],
+ },
UpstreamSource {
project_name: "nginxinc/kubernetes-ingress",
license: APACHE_V2,
diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml
new file mode 100644
index 000000000..0d1f01b4e
--- /dev/null
+++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml
@@ -0,0 +1,1711 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.6.1
+ name: flowcollectors.flows.netobserv.io
+spec:
+ group: flows.netobserv.io
+ names:
+ kind: FlowCollector
+ listKind: FlowCollectorList
+ plural: flowcollectors
+ singular: flowcollector
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.agent.type
+ name: Agent
+ type: string
+ - jsonPath: .spec.agent.ebpf.sampling
+ name: Sampling (EBPF)
+ type: string
+ - jsonPath: .spec.deploymentModel
+ name: Deployment Model
+ type: string
+ - jsonPath: .status.conditions[*].reason
+ name: Status
+ type: string
+ deprecated: true
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: "FlowCollector is the Schema for the flowcollectors API, which pilots and configures netflow collection. \n Deprecated: This package will be removed in one of the next releases."
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: FlowCollectorSpec defines the desired state of FlowCollector
+ properties:
+ agent:
+ default:
+ type: EBPF
+ description: agent for flows extraction.
+ properties:
+ ebpf:
+ description: ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+ properties:
+ cacheActiveTimeout:
+ default: 5s
+ description: cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 100000
+ description: cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ format: int32
+ minimum: 1.0
+ type: integer
+ debug:
+ description: Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+ type: object
+ type: object
+ excludeInterfaces:
+ default:
+ - lo
+ description: excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+ items:
+ type: string
+ type: array
+ imagePullPolicy:
+ default: IfNotPresent
+ description: imagePullPolicy is the Kubernetes pull policy for the image defined above
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ interfaces:
+ description: interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+ items:
+ type: string
+ type: array
+ kafkaBatchSize:
+ default: 10485760
+ description: 'kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ logLevel:
+ default: info
+ description: logLevel defines the log level for the NetObserv eBPF Agent
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ privileged:
+ description: 'privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges.'
+ type: boolean
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: 'resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ sampling:
+ default: 50
+ description: sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+ format: int32
+ minimum: 0.0
+ type: integer
+ type: object
+ ipfix:
+ description: ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX".
+ properties:
+ cacheActiveTimeout:
+ default: 20s
+ description: cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 400
+ description: cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows
+ format: int32
+ minimum: 0.0
+ type: integer
+ clusterNetworkOperator:
+ description: clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available.
+ properties:
+ namespace:
+ default: openshift-network-operator
+ description: namespace where the config map is going to be deployed.
+ type: string
+ type: object
+ forceSampleAll:
+ default: false
+ description: forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored.
+ type: boolean
+ ovnKubernetes:
+ description: ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+ properties:
+ containerName:
+ default: ovnkube-node
+ description: containerName defines the name of the container to configure for IPFIX.
+ type: string
+ daemonSetName:
+ default: ovnkube-node
+ description: daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods.
+ type: string
+ namespace:
+ default: ovn-kubernetes
+ description: namespace where OVN-Kubernetes pods are deployed.
+ type: string
+ type: object
+ sampling:
+ default: 400
+ description: sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX.
+ format: int32
+ minimum: 2.0
+ type: integer
+ type: object
+ type:
+ default: EBPF
+ description: type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ enum:
+ - EBPF
+ - IPFIX
+ type: string
+ required:
+ - type
+ type: object
+ consolePlugin:
+ description: consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+ properties:
+ autoscaler:
+ description: autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+ properties:
+ maxReplicas:
+ default: 3
+ description: maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ format: int32
+ type: integer
+ metrics:
+ description: metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ imagePullPolicy:
+ default: IfNotPresent
+ description: imagePullPolicy is the Kubernetes pull policy for the image defined above
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ logLevel:
+ default: info
+ description: logLevel for the console plugin backend
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ port:
+ default: 9001
+ description: port is the plugin service port
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ portNaming:
+ default:
+ enable: true
+ description: portNaming defines the configuration of the port-to-service name translation
+ properties:
+ enable:
+ default: true
+ description: enable the console plugin port-to-service name translation
+ type: boolean
+ portNames:
+ additionalProperties:
+ type: string
+ description: 'portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"}'
+ type: object
+ type: object
+ quickFilters:
+ default:
+ - default: true
+ filter:
+ dst_namespace!: openshift-,netobserv
+ src_namespace!: openshift-,netobserv
+ name: Applications
+ - filter:
+ dst_namespace: openshift-,netobserv
+ src_namespace: openshift-,netobserv
+ name: Infrastructure
+ - default: true
+ filter:
+ dst_kind: Pod
+ src_kind: Pod
+ name: Pods network
+ - filter:
+ dst_kind: Service
+ name: Services network
+ description: quickFilters configures quick filter presets for the Console plugin
+ items:
+ description: QuickFilter defines preset configuration for Console's quick filters
+ properties:
+ default:
+ description: default defines whether this filter should be active by default or not
+ type: boolean
+ filter:
+ additionalProperties:
+ type: string
+ description: 'filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"}'
+ type: object
+ name:
+ description: name of the filter, that will be displayed in Console
+ type: string
+ required:
+ - filter
+ - name
+ type: object
+ type: array
+ register:
+ default: true
+ description: 'register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'''
+ type: boolean
+ replicas:
+ default: 1
+ description: replicas defines the number of replicas (pods) to start.
+ format: int32
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: 'resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ required:
+ - register
+ type: object
+ deploymentModel:
+ default: DIRECT
+ description: deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+ enum:
+ - DIRECT
+ - KAFKA
+ type: string
+ exporters:
+ description: exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available.
+ items:
+ description: FlowCollectorExporter defines an additional exporter to send enriched flows to
+ properties:
+ ipfix:
+ description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ properties:
+ targetHost:
+ default: ''
+ description: Address of the IPFIX external receiver
+ type: string
+ targetPort:
+ description: Port for the IPFIX external receiver
+ type: integer
+ transport:
+ description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ enum:
+ - TCP
+ - UDP
+ type: string
+ required:
+ - targetHost
+ - targetPort
+ type: object
+ kafka:
+ description: kafka configuration, such as address or topic, to send enriched flows to.
+ properties:
+ address:
+ default: ''
+ description: address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+ properties:
+ caCert:
+ description: caCert defines the reference of the certificate for the Certificate Authority
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ type: boolean
+ userCert:
+ description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: kafka topic to use. It must exist, NetObserv will not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ type:
+ description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.'
+ enum:
+ - KAFKA
+ - IPFIX
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ kafka:
+ description: kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA".
+ properties:
+ address:
+ default: ''
+ description: address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+ properties:
+ caCert:
+ description: caCert defines the reference of the certificate for the Certificate Authority
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ type: boolean
+ userCert:
+ description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: kafka topic to use. It must exist, NetObserv will not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ loki:
+ description: loki, the flow store, client settings.
+ properties:
+ authToken:
+ default: DISABLED
+ description: AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST will use the local pod service account to authenticate to Loki. FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. When using the Loki Operator, set it to `HOST` or `FORWARD`.
+ enum:
+ - DISABLED
+ - HOST
+ - FORWARD
+ type: string
+ batchSize:
+ default: 102400
+ description: batchSize is max batch size (in bytes) of logs to accumulate before sending.
+ format: int64
+ minimum: 1.0
+ type: integer
+ batchWait:
+ default: 1s
+ description: batchWait is max time to wait before sending a batch.
+ type: string
+ maxBackoff:
+ default: 5s
+ description: maxBackoff is the maximum backoff time for client connection between retries.
+ type: string
+ maxRetries:
+ default: 2
+ description: maxRetries is the maximum number of retries for client connections.
+ format: int32
+ minimum: 0.0
+ type: integer
+ minBackoff:
+ default: 1s
+ description: minBackoff is the initial backoff time for client connection between retries.
+ type: string
+ querierUrl:
+ description: querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
+ type: string
+ staticLabels:
+ additionalProperties:
+ type: string
+ default:
+ app: netobserv-flowcollector
+ description: staticLabels is a map of common labels to set on each flow.
+ type: object
+ statusUrl:
+ description: statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/.
+ type: string
+ tenantID:
+ default: netobserv
+ description: tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.
+ type: string
+ timeout:
+ default: 10s
+ description: timeout is the maximum time connection / request limit. A Timeout of zero means no timeout.
+ type: string
+ tls:
+ description: tls client configuration.
+ properties:
+ caCert:
+ description: caCert defines the reference of the certificate for the Certificate Authority
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ type: boolean
+ userCert:
+ description: userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ url:
+ default: http://loki:3100/
+ description: url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.
+ type: string
+ type: object
+ namespace:
+ description: namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
+ type: string
+ processor:
+ description: processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+ properties:
+ debug:
+ description: Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+ type: object
+ type: object
+ dropUnusedFields:
+ default: true
+ description: dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space.
+ type: boolean
+ enableKubeProbes:
+ default: true
+ description: enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes
+ type: boolean
+ healthPort:
+ default: 8080
+ description: healthPort is a collector HTTP port in the Pod that exposes the health check API
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ imagePullPolicy:
+ default: IfNotPresent
+ description: imagePullPolicy is the Kubernetes pull policy for the image defined above
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ kafkaConsumerAutoscaler:
+ description: kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+ properties:
+ maxReplicas:
+ default: 3
+ description: maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ format: int32
+ type: integer
+ metrics:
+ description: metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ kafkaConsumerBatchSize:
+ default: 10485760
+ description: 'kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ kafkaConsumerQueueCapacity:
+ default: 1000
+ description: kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.
+ type: integer
+ kafkaConsumerReplicas:
+ default: 3
+ description: kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+ format: int32
+ minimum: 0.0
+ type: integer
+ logLevel:
+ default: info
+ description: logLevel of the collector runtime
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ metrics:
+ description: Metrics define the processor configuration regarding metrics
+ properties:
+ ignoreTags:
+ default:
+ - egress
+ - packets
+ description: 'ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads'
+ items:
+ type: string
+ type: array
+ server:
+ description: metricsServer endpoint configuration for Prometheus scraper
+ properties:
+ port:
+ default: 9102
+ description: the prometheus HTTP port
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ tls:
+ description: TLS configuration.
+ properties:
+ provided:
+ description: TLS configuration.
+ properties:
+ certFile:
+ description: certFile defines the path to the certificate file name within the config map or secret
+ type: string
+ certKey:
+ description: certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ type: string
+ name:
+ description: name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'type for the certificate reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations
+ enum:
+ - DISABLED
+ - PROVIDED
+ - AUTO
+ type: string
+ type: object
+ type: object
+ type: object
+ port:
+ default: 2055
+ description: 'port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500'
+ format: int32
+ maximum: 65535.0
+ minimum: 1025.0
+ type: integer
+ profilePort:
+ description: profilePort allows setting up a Go pprof profiler listening to this port
+ format: int32
+ maximum: 65535.0
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ description: 'resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ required:
+ - agent
+ - deploymentModel
+ type: object
+ status:
+ description: FlowCollectorStatus defines the observed state of FlowCollector
+ properties:
+ conditions:
+ description: conditions represent the latest available observations of an object's state
+ items:
+ description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+ format: int64
+ minimum: 0.0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - 'True'
+ - 'False'
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ namespace:
+ description: namespace where console plugin and flowlogs-pipeline have been deployed.
+ type: string
+ required:
+ - conditions
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ''
+ plural: ''
+ conditions: []
+ storedVersions: []
diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml
new file mode 100644
index 000000000..018a700bc
--- /dev/null
+++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml
@@ -0,0 +1,1840 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.6.1
+ name: flowcollectors.flows.netobserv.io
+spec:
+ group: flows.netobserv.io
+ names:
+ kind: FlowCollector
+ listKind: FlowCollectorList
+ plural: flowcollectors
+ singular: flowcollector
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.agent.type
+ name: Agent
+ type: string
+ - jsonPath: .spec.agent.ebpf.sampling
+ name: Sampling (EBPF)
+ type: string
+ - jsonPath: .spec.deploymentModel
+ name: Deployment Model
+ type: string
+ - jsonPath: .status.conditions[*].reason
+ name: Status
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.'
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: 'Defines the desired state of the FlowCollector resource.
*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.'
+ properties:
+ agent:
+ description: Agent configuration for flows extraction.
+ properties:
+ ebpf:
+ description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.'
+ properties:
+ cacheActiveTimeout:
+ default: 5s
+ description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.'
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 100000
+ description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.'
+ format: int32
+ minimum: 1.0
+ type: integer
+ debug:
+ description: '`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.'
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.'
+ type: object
+ type: object
+ excludeInterfaces:
+ default:
+ - lo
+ description: '`excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.'
+ items:
+ type: string
+ type: array
+ features:
+ description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
'
+ items:
+ description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency. [Unsupported (*)].
+ enum:
+ - PacketDrop
+ - DNSTracking
+ - FlowRTT
+ type: string
+ type: array
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ interfaces:
+ description: '`interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.'
+ items:
+ type: string
+ type: array
+ kafkaBatchSize:
+ default: 10485760
+ description: '`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ logLevel:
+ default: info
+ description: '`logLevel` defines the log level for the NetObserv eBPF Agent'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ privileged:
+ description: 'Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to `false`: in that case, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges.'
+ type: boolean
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ sampling:
+ default: 50
+ description: Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+ format: int32
+ minimum: 0.0
+ type: integer
+ type: object
+ ipfix:
+ description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.'
+ properties:
+ cacheActiveTimeout:
+ default: 20s
+ description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending.'
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 400
+ description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ clusterNetworkOperator:
+ description: '`clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.'
+ properties:
+ namespace:
+ default: openshift-network-operator
+ description: Namespace where the config map is going to be deployed.
+ type: string
+ type: object
+ forceSampleAll:
+ default: false
+ description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.'
+ type: boolean
+ ovnKubernetes:
+ description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.'
+ properties:
+ containerName:
+ default: ovnkube-node
+ description: '`containerName` defines the name of the container to configure for IPFIX.'
+ type: string
+ daemonSetName:
+ default: ovnkube-node
+ description: '`daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods.'
+ type: string
+ namespace:
+ default: ovn-kubernetes
+ description: Namespace where OVN-Kubernetes pods are deployed.
+ type: string
+ type: object
+ sampling:
+ default: 400
+ description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.'
+ format: int32
+ minimum: 2.0
+ type: integer
+ type: object
+ type:
+ default: EBPF
+ description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).'
+ enum:
+ - EBPF
+ - IPFIX
+ type: string
+ type: object
+ consolePlugin:
+ description: '`consolePlugin` defines the settings related to the OpenShift Console plugin, when available.'
+ properties:
+ autoscaler:
+ description: '`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.'
+ properties:
+ maxReplicas:
+ default: 3
+ description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.'
+ format: int32
+ type: integer
+ metrics:
+ description: Metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.'
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
'
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ enable:
+ default: true
+ description: Enables the console plugin deployment. `spec.Loki.enable` must also be `true`
+ type: boolean
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ logLevel:
+ default: info
+ description: '`logLevel` for the console plugin backend'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ port:
+ default: 9001
+ description: '`port` is the plugin service port. Do not use 9002, which is reserved for metrics.'
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ portNaming:
+ default:
+ enable: true
+ description: '`portNaming` defines the configuration of the port-to-service name translation'
+ properties:
+ enable:
+ default: true
+ description: Enable the console plugin port-to-service name translation
+ type: boolean
+ portNames:
+ additionalProperties:
+ type: string
+ description: '`portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`.'
+ type: object
+ type: object
+ quickFilters:
+ default:
+ - default: true
+ filter:
+ dst_namespace!: openshift-,netobserv
+ src_namespace!: openshift-,netobserv
+ name: Applications
+ - filter:
+ dst_namespace: openshift-,netobserv
+ src_namespace: openshift-,netobserv
+ name: Infrastructure
+ - default: true
+ filter:
+ dst_kind: Pod
+ src_kind: Pod
+ name: Pods network
+ - filter:
+ dst_kind: Service
+ name: Services network
+ description: '`quickFilters` configures quick filter presets for the Console plugin'
+ items:
+ description: '`QuickFilter` defines preset configuration for Console''s quick filters'
+ properties:
+ default:
+ description: '`default` defines whether this filter should be active by default or not'
+ type: boolean
+ filter:
+ additionalProperties:
+ type: string
+ description: '`filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`.'
+ type: object
+ name:
+ description: Name of the filter, that is displayed in the Console
+ type: string
+ required:
+ - filter
+ - name
+ type: object
+ type: array
+ register:
+ default: true
+ description: '`register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator. When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`'
+ type: boolean
+ replicas:
+ default: 1
+ description: '`replicas` defines the number of replicas (pods) to start.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ deploymentModel:
+ default: DIRECT
+ description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).'
+ enum:
+ - DIRECT
+ - KAFKA
+ type: string
+ exporters:
+ description: '`exporters` define additional optional exporters for custom consumption or storage.'
+ items:
+ description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.'
+ properties:
+ ipfix:
+ description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ properties:
+ targetHost:
+ default: ''
+ description: Address of the IPFIX external receiver
+ type: string
+ targetPort:
+ description: Port for the IPFIX external receiver
+ type: integer
+ transport:
+ description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ enum:
+ - TCP
+ - UDP
+ type: string
+ required:
+ - targetHost
+ - targetPort
+ type: object
+ kafka:
+ description: Kafka configuration, such as the address and topic, to send enriched flows to.
+ properties:
+ address:
+ default: ''
+ description: Address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: Kafka topic to use. It must exist. NetObserv does not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ type:
+ description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.'
+ enum:
+ - KAFKA
+ - IPFIX
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ kafka:
+ description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
+ properties:
+ address:
+ default: ''
+ description: Address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: Kafka topic to use. It must exist. NetObserv does not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ loki:
+ description: '`loki`, the flow store, client settings.'
+ properties:
+ authToken:
+ default: DISABLED
+ description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.'
+ enum:
+ - DISABLED
+ - HOST
+ - FORWARD
+ type: string
+ batchSize:
+ default: 102400
+ description: '`batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.'
+ format: int64
+ minimum: 1.0
+ type: integer
+ batchWait:
+ default: 1s
+ description: '`batchWait` is the maximum time to wait before sending a batch.'
+ type: string
+ enable:
+ default: true
+ description: Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation.
+ type: boolean
+ maxBackoff:
+ default: 5s
+ description: '`maxBackoff` is the maximum backoff time for client connection between retries.'
+ type: string
+ maxRetries:
+ default: 2
+ description: '`maxRetries` is the maximum number of retries for client connections.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ minBackoff:
+ default: 1s
+ description: '`minBackoff` is the initial backoff time for client connection between retries.'
+ type: string
+ querierUrl:
+ description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value is used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.'
+ type: string
+ staticLabels:
+ additionalProperties:
+ type: string
+ default:
+ app: netobserv-flowcollector
+ description: '`staticLabels` is a map of common labels to set on each flow.'
+ type: object
+ statusTls:
+ description: TLS client configuration for Loki status URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ statusUrl:
+ description: '`statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value is used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration is used when `statusUrl` is set.'
+ type: string
+ tenantID:
+ default: netobserv
+ description: '`tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.'
+ type: string
+ timeout:
+ default: 10s
+ description: '`timeout` is the maximum time connection / request limit. A timeout of zero means no timeout.'
+ type: string
+ tls:
+ description: TLS client configuration for Loki URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ url:
+ default: http://loki:3100/
+ description: '`url` is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.'
+ type: string
+ type: object
+ namespace:
+ default: netobserv
+ description: Namespace where NetObserv pods are deployed.
+ type: string
+ processor:
+ description: '`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.'
+ properties:
+ clusterName:
+ default: ''
+ description: '`clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.'
+ type: string
+ conversationEndTimeout:
+ default: 10s
+ description: '`conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).'
+ type: string
+ conversationHeartbeatInterval:
+ default: 30s
+ description: '`conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation'
+ type: string
+ conversationTerminatingTimeout:
+ default: 5s
+ description: '`conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.'
+ type: string
+ debug:
+ description: '`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.'
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.'
+ type: object
+ type: object
+ dropUnusedFields:
+ default: true
+ description: '`dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.'
+ type: boolean
+ enableKubeProbes:
+ default: true
+ description: '`enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes'
+ type: boolean
+ healthPort:
+ default: 8080
+ description: '`healthPort` is a collector HTTP port in the Pod that exposes the health check API'
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ kafkaConsumerAutoscaler:
+ description: '`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.'
+ properties:
+ maxReplicas:
+ default: 3
+ description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.'
+ format: int32
+ type: integer
+ metrics:
+ description: Metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.'
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
'
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ kafkaConsumerBatchSize:
+ default: 10485760
+ description: '`kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ kafkaConsumerQueueCapacity:
+ default: 1000
+ description: '`kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.'
+ type: integer
+ kafkaConsumerReplicas:
+ default: 3
+ description: '`kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ logLevel:
+ default: info
+ description: '`logLevel` of the processor runtime'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ logTypes:
+ default: FLOWS
+ description: '`logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events
'
+ enum:
+ - FLOWS
+ - CONVERSATIONS
+ - ENDED_CONVERSATIONS
+ - ALL
+ type: string
+ metrics:
+ description: '`Metrics` define the processor configuration regarding metrics'
+ properties:
+ disableAlerts:
+ description: '`disableAlerts` is a list of alerts that should be disabled. Possible values are:
`NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
`NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
'
+ items:
+ description: Name of a processor alert. Possible values are:
- `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
- `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
+ enum:
+ - NetObservNoFlows
+ - NetObservLokiError
+ type: string
+ type: array
+ ignoreTags:
+ default:
+ - egress
+ - packets
+ - nodes-flows
+ - namespaces-flows
+ - workloads-flows
+ - namespaces
+ description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).'
+ items:
+ type: string
+ type: array
+ server:
+ description: Metrics server endpoint configuration for Prometheus scraper
+ properties:
+ port:
+ default: 9102
+ description: The prometheus HTTP port
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ tls:
+ description: TLS configuration.
+ properties:
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the provided certificate. If set to `true`, the `providedCaFile` field is ignored.'
+ type: boolean
+ provided:
+ description: TLS configuration when `type` is set to `PROVIDED`.
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ providedCaFile:
+ description: Reference to the CA file when `type` is set to `PROVIDED`.
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations.
+ enum:
+ - DISABLED
+ - PROVIDED
+ - AUTO
+ type: string
+ type: object
+ type: object
+ type: object
+ port:
+ default: 2055
+ description: Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081.
+ format: int32
+ maximum: 65535.0
+ minimum: 1025.0
+ type: integer
+ profilePort:
+ description: '`profilePort` allows setting up a Go pprof profiler listening to this port'
+ format: int32
+ maximum: 65535.0
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ type: object
+ status:
+ description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
+ properties:
+ conditions:
+ description: '`conditions` represent the latest available observations of an object''s state'
+ items:
+ description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+ format: int64
+ minimum: 0.0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - 'True'
+ - 'False'
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ namespace:
+ description: Namespace where console plugin and flowlogs-pipeline have been deployed.
+ type: string
+ required:
+ - conditions
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ''
+ plural: ''
+ conditions: []
+ storedVersions: []
diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml
new file mode 100644
index 000000000..7b4c2518b
--- /dev/null
+++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml
@@ -0,0 +1,2008 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.6.1
+ name: flowcollectors.flows.netobserv.io
+spec:
+ group: flows.netobserv.io
+ names:
+ kind: FlowCollector
+ listKind: FlowCollectorList
+ plural: flowcollectors
+ singular: flowcollector
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .spec.agent.type
+ name: Agent
+ type: string
+ - jsonPath: .spec.agent.ebpf.sampling
+ name: Sampling (EBPF)
+ type: string
+ - jsonPath: .spec.deploymentModel
+ name: Deployment Model
+ type: string
+ - jsonPath: .status.conditions[*].reason
+ name: Status
+ type: string
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: '`FlowCollector` is the schema for the network flows collection API, which pilots and configures the underlying deployments.'
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: 'Defines the desired state of the FlowCollector resource.
*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.'
+ properties:
+ agent:
+ description: Agent configuration for flows extraction.
+ properties:
+ ebpf:
+ description: '`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.'
+ properties:
+ cacheActiveTimeout:
+ default: 5s
+ description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.'
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 100000
+ description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.'
+ format: int32
+ minimum: 1.0
+ type: integer
+ debug:
+ description: '`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.'
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.'
+ type: object
+ type: object
+ excludeInterfaces:
+ default:
+ - lo
+ description: '`excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.'
+ items:
+ type: string
+ type: array
+ features:
+ description: 'List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
'
+ items:
+ description: Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency. [Unsupported (*)].
+ enum:
+ - PacketDrop
+ - DNSTracking
+ - FlowRTT
+ type: string
+ type: array
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ interfaces:
+ description: '`interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.'
+ items:
+ type: string
+ type: array
+ kafkaBatchSize:
+ default: 10485760
+ description: '`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ logLevel:
+ default: info
+ description: '`logLevel` defines the log level for the NetObserv eBPF Agent'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ privileged:
+ description: 'Privileged mode for the eBPF Agent container. In general this setting can be ignored or set to `false`: in that case, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges.'
+ type: boolean
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ sampling:
+ default: 50
+ description: Sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+ format: int32
+ minimum: 0.0
+ type: integer
+ type: object
+ ipfix:
+ description: '`ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.'
+ properties:
+ cacheActiveTimeout:
+ default: 20s
+ description: '`cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending.'
+ pattern: ^\d+(ns|ms|s|m)?$
+ type: string
+ cacheMaxFlows:
+ default: 400
+ description: '`cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ clusterNetworkOperator:
+ description: '`clusterNetworkOperator` defines the settings related to the OpenShift Cluster Network Operator, when available.'
+ properties:
+ namespace:
+ default: openshift-network-operator
+ description: Namespace where the config map is going to be deployed.
+ type: string
+ type: object
+ forceSampleAll:
+ default: false
+ description: '`forceSampleAll` allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to `true`. Use at your own risk. When it is set to `true`, the value of `sampling` is ignored.'
+ type: boolean
+ ovnKubernetes:
+ description: '`ovnKubernetes` defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN''s IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.'
+ properties:
+ containerName:
+ default: ovnkube-node
+ description: '`containerName` defines the name of the container to configure for IPFIX.'
+ type: string
+ daemonSetName:
+ default: ovnkube-node
+ description: '`daemonSetName` defines the name of the DaemonSet controlling the OVN-Kubernetes pods.'
+ type: string
+ namespace:
+ default: ovn-kubernetes
+ description: Namespace where OVN-Kubernetes pods are deployed.
+ type: string
+ type: object
+ sampling:
+ default: 400
+ description: '`sampling` is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to `forceSampleAll`. Alternatively, you can use the eBPF Agent instead of IPFIX.'
+ format: int32
+ minimum: 2.0
+ type: integer
+ type: object
+ type:
+ default: EBPF
+ description: '`type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).'
+ enum:
+ - EBPF
+ - IPFIX
+ type: string
+ type: object
+ consolePlugin:
+ description: '`consolePlugin` defines the settings related to the OpenShift Console plugin, when available.'
+ properties:
+ autoscaler:
+ description: '`autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment.'
+ properties:
+ maxReplicas:
+ default: 3
+ description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.'
+ format: int32
+ type: integer
+ metrics:
+ description: Metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.'
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
'
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ enable:
+ default: true
+ description: Enables the console plugin deployment. `spec.Loki.enable` must also be `true`
+ type: boolean
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ logLevel:
+ default: info
+ description: '`logLevel` for the console plugin backend'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ port:
+ default: 9001
+ description: '`port` is the plugin service port. Do not use 9002, which is reserved for metrics.'
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ portNaming:
+ default:
+ enable: true
+ description: '`portNaming` defines the configuration of the port-to-service name translation'
+ properties:
+ enable:
+ default: true
+ description: Enable the console plugin port-to-service name translation
+ type: boolean
+ portNames:
+ additionalProperties:
+ type: string
+ description: '`portNames` defines additional port names to use in the console, for example, `portNames: {"3100": "loki"}`.'
+ type: object
+ type: object
+ quickFilters:
+ default:
+ - default: true
+ filter:
+ dst_namespace!: openshift-,netobserv
+ src_namespace!: openshift-,netobserv
+ name: Applications
+ - filter:
+ dst_namespace: openshift-,netobserv
+ src_namespace: openshift-,netobserv
+ name: Infrastructure
+ - default: true
+ filter:
+ dst_kind: Pod
+ src_kind: Pod
+ name: Pods network
+ - filter:
+ dst_kind: Service
+ name: Services network
+ description: '`quickFilters` configures quick filter presets for the Console plugin'
+ items:
+ description: '`QuickFilter` defines preset configuration for Console''s quick filters'
+ properties:
+ default:
+ description: '`default` defines whether this filter should be active by default or not'
+ type: boolean
+ filter:
+ additionalProperties:
+ type: string
+ description: '`filter` is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string, for example, `filter: {"src_namespace": "namespace1,namespace2"}`.'
+ type: object
+ name:
+ description: Name of the filter, that is displayed in the Console
+ type: string
+ required:
+ - filter
+ - name
+ type: object
+ type: array
+ register:
+ default: true
+ description: '`register` allows, when set to `true`, to automatically register the provided console plugin with the OpenShift Console operator. When set to `false`, you can still register it manually by editing console.operator.openshift.io/cluster with the following command: `oc patch console.operator.openshift.io cluster --type=''json'' -p ''[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]''`'
+ type: boolean
+ replicas:
+ default: 1
+ description: '`replicas` defines the number of replicas (pods) to start.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ description: '`resources`, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ deploymentModel:
+ default: DIRECT
+ description: '`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).'
+ enum:
+ - DIRECT
+ - KAFKA
+ type: string
+ exporters:
+ description: '`exporters` define additional optional exporters for custom consumption or storage.'
+ items:
+ description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.'
+ properties:
+ ipfix:
+ description: IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ properties:
+ targetHost:
+ default: ''
+ description: Address of the IPFIX external receiver
+ type: string
+ targetPort:
+ description: Port for the IPFIX external receiver
+ type: integer
+ transport:
+ description: Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ enum:
+ - TCP
+ - UDP
+ type: string
+ required:
+ - targetHost
+ - targetPort
+ type: object
+ kafka:
+ description: Kafka configuration, such as the address and topic, to send enriched flows to.
+ properties:
+ address:
+ default: ''
+ description: Address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: Kafka topic to use. It must exist. NetObserv does not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ type:
+ description: '`type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.'
+ enum:
+ - KAFKA
+ - IPFIX
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ kafka:
+ description: Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
+ properties:
+ address:
+ default: ''
+ description: Address of the Kafka server
+ type: string
+ sasl:
+ description: SASL authentication configuration. [Unsupported (*)].
+ properties:
+ clientIDReference:
+ description: Reference to the secret or config map containing the client ID
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ clientSecretReference:
+ description: Reference to the secret or config map containing the client secret
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ enum:
+ - DISABLED
+ - PLAIN
+ - SCRAM-SHA512
+ type: string
+ type: object
+ tls:
+ description: TLS client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ topic:
+ default: ''
+ description: Kafka topic to use. It must exist. NetObserv does not create it.
+ type: string
+ required:
+ - address
+ - topic
+ type: object
+ loki:
+ description: '`loki`, the flow store, client settings.'
+ properties:
+ batchSize:
+ default: 102400
+ description: '`batchSize` is the maximum batch size (in bytes) of logs to accumulate before sending.'
+ format: int64
+ minimum: 1.0
+ type: integer
+ batchWait:
+ default: 1s
+ description: '`batchWait` is the maximum time to wait before sending a batch.'
+ type: string
+ enable:
+ default: true
+ description: Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation.
+ type: boolean
+ lokiStack:
+ description: Loki configuration for "LokiStack" mode. This is useful for an easy loki-operator configuration. It is ignored for other modes.
+ properties:
+ name:
+ default: loki
+ description: Name of an existing LokiStack resource to use.
+ type: string
+ namespace:
+ description: Namespace where this `LokiStack` resource is located. If omited, it is assumed to be the same as `spec.namespace`.
+ type: string
+ type: object
+ manual:
+ description: Loki configuration for "Manual" mode. This is the most flexible configuration. It is ignored for other modes.
+ properties:
+ authToken:
+ default: DISABLED
+ description: '`authToken` describes the way to get a token to authenticate to Loki.
- `DISABLED` does not send any token with the request.
- `FORWARD` forwards the user token for authorization.
- `HOST` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `FORWARD`.'
+ enum:
+ - DISABLED
+ - HOST
+ - FORWARD
+ type: string
+ ingesterUrl:
+ default: http://loki:3100/
+ description: '`ingesterUrl` is the address of an existing Loki ingester service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.'
+ type: string
+ querierUrl:
+ description: '`querierURL` specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value is used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.'
+ type: string
+ statusTls:
+ description: TLS client configuration for Loki status URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ statusUrl:
+ description: '`statusURL` specifies the address of the Loki `/ready`, `/metrics` and `/config` endpoints, in case it is different from the Loki querier URL. If empty, the `querierURL` value is used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/. `statusTLS` configuration is used when `statusUrl` is set.'
+ type: string
+ tenantID:
+ default: netobserv
+ description: '`tenantID` is the Loki `X-Scope-OrgID` that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.'
+ type: string
+ tls:
+ description: TLS client configuration for Loki URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ type: object
+ maxBackoff:
+ default: 5s
+ description: '`maxBackoff` is the maximum backoff time for client connection between retries.'
+ type: string
+ maxRetries:
+ default: 2
+ description: '`maxRetries` is the maximum number of retries for client connections.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ microservices:
+ description: Loki configuration for "Microservices" mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes.
+ properties:
+ ingesterUrl:
+ default: http://loki-distributor:3100/
+ description: '`ingesterUrl` is the address of an existing Loki ingester service to push the flows to.'
+ type: string
+ querierUrl:
+ default: http://loki-query-frontend:3100/
+ description: '`querierURL` specifies the address of the Loki querier service.'
+ type: string
+ tenantID:
+ default: netobserv
+ description: '`tenantID` is the Loki `X-Scope-OrgID` header that identifies the tenant for each request.'
+ type: string
+ tls:
+ description: TLS client configuration for Loki URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ type: object
+ minBackoff:
+ default: 1s
+ description: '`minBackoff` is the initial backoff time for client connection between retries.'
+ type: string
+ mode:
+ default: Monolithic
+ description: '`mode` must be set according to the installation mode of Loki:
- Use "LokiStack" when Loki is managed using the Loki Operator
- Use "Monolithic" when Loki is installed as a monolithic workload
- Use "Microservices" when Loki is installed as microservices, but without Loki Operator
- Use "Manual" if none of the options above match your setup
'
+ enum:
+ - Manual
+ - LokiStack
+ - Monolithic
+ - Microservices
+ type: string
+ monolithic:
+ description: Loki configuration for "Monolithic" mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes.
+ properties:
+ tenantID:
+ default: netobserv
+ description: '`tenantID` is the Loki `X-Scope-OrgID` header that identifies the tenant for each request.'
+ type: string
+ tls:
+ description: TLS client configuration for Loki URL.
+ properties:
+ caCert:
+ description: '`caCert` defines the reference of the certificate for the Certificate Authority'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ enable:
+ default: false
+ description: Enable TLS
+ type: boolean
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the server certificate. If set to `true`, the `caCert` field is ignored.'
+ type: boolean
+ userCert:
+ description: '`userCert` defines the user certificate reference and is used for mTLS (you can ignore it when using one-way TLS)'
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type: object
+ url:
+ default: http://loki:3100/
+ description: '`url` is the unique address of an existing Loki service that points to both the ingester and the querier.'
+ type: string
+ type: object
+ staticLabels:
+ additionalProperties:
+ type: string
+ default:
+ app: netobserv-flowcollector
+ description: '`staticLabels` is a map of common labels to set on each flow.'
+ type: object
+ timeout:
+ default: 10s
+ description: '`timeout` is the maximum time connection / request limit. A timeout of zero means no timeout.'
+ type: string
+ type: object
+ namespace:
+ default: netobserv
+ description: Namespace where NetObserv pods are deployed.
+ type: string
+ processor:
+ description: '`processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.'
+ properties:
+ clusterName:
+ default: ''
+ description: '`clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined.'
+ type: string
+ conversationEndTimeout:
+ default: 10s
+ description: '`conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead).'
+ type: string
+ conversationHeartbeatInterval:
+ default: 30s
+ description: '`conversationHeartbeatInterval` is the time to wait between "tick" events of a conversation'
+ type: string
+ conversationTerminatingTimeout:
+ default: 5s
+ description: '`conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows.'
+ type: string
+ debug:
+ description: '`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.'
+ properties:
+ env:
+ additionalProperties:
+ type: string
+ description: '`env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios.'
+ type: object
+ type: object
+ dropUnusedFields:
+ default: true
+ description: '`dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space.'
+ type: boolean
+ enableKubeProbes:
+ default: true
+ description: '`enableKubeProbes` is a flag to enable or disable Kubernetes liveness and readiness probes'
+ type: boolean
+ healthPort:
+ default: 8080
+ description: '`healthPort` is a collector HTTP port in the Pod that exposes the health check API'
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ imagePullPolicy:
+ default: IfNotPresent
+ description: '`imagePullPolicy` is the Kubernetes pull policy for the image defined above'
+ enum:
+ - IfNotPresent
+ - Always
+ - Never
+ type: string
+ kafkaConsumerAutoscaler:
+ description: '`kafkaConsumerAutoscaler` is the spec of a horizontal pod autoscaler to set up for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.'
+ properties:
+ maxReplicas:
+ default: 3
+ description: '`maxReplicas` is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.'
+ format: int32
+ type: integer
+ metrics:
+ description: Metrics used by the pod autoscaler
+ items:
+ description: MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+ properties:
+ containerResource:
+ description: containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ properties:
+ container:
+ description: container is the name of the container in the pods of the scaling target
+ type: string
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - container
+ - name
+ - target
+ type: object
+ external:
+ description: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ object:
+ description: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ properties:
+ describedObject:
+ description: describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ properties:
+ apiVersion:
+ description: apiVersion is the API version of the referent
+ type: string
+ kind:
+ description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - describedObject
+ - metric
+ - target
+ type: object
+ pods:
+ description: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ properties:
+ metric:
+ description: metric identifies the target metric by name and selector
+ properties:
+ name:
+ description: name is the name of the given metric
+ type: string
+ selector:
+ description: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - metric
+ - target
+ type: object
+ resource:
+ description: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ properties:
+ name:
+ description: name is the name of the resource in question.
+ type: string
+ target:
+ description: target specifies the target value for the given metric
+ properties:
+ averageUtilization:
+ description: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ format: int32
+ type: integer
+ averageValue:
+ anyOf:
+ - type: integer
+ - type: string
+ description: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type:
+ description: type represents whether the metric type is Utilization, Value, or AverageValue
+ type: string
+ value:
+ anyOf:
+ - type: integer
+ - type: string
+ description: value is the target value of the metric (as a quantity).
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ required:
+ - type
+ type: object
+ required:
+ - name
+ - target
+ type: object
+ type:
+ description: 'type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled'
+ type: string
+ required:
+ - type
+ type: object
+ type: array
+ minReplicas:
+ description: '`minReplicas` is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.'
+ format: int32
+ type: integer
+ status:
+ default: DISABLED
+ description: '`status` describes the desired status regarding deploying an horizontal pod autoscaler.
- `DISABLED` does not deploy an horizontal pod autoscaler.
- `ENABLED` deploys an horizontal pod autoscaler.
'
+ enum:
+ - DISABLED
+ - ENABLED
+ type: string
+ type: object
+ kafkaConsumerBatchSize:
+ default: 10485760
+ description: '`kafkaConsumerBatchSize` indicates to the broker the maximum batch size, in bytes, that the consumer accepts. Ignored when not using Kafka. Default: 10MB.'
+ type: integer
+ kafkaConsumerQueueCapacity:
+ default: 1000
+ description: '`kafkaConsumerQueueCapacity` defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.'
+ type: integer
+ kafkaConsumerReplicas:
+ default: 3
+ description: '`kafkaConsumerReplicas` defines the number of replicas (pods) to start for `flowlogs-pipeline-transformer`, which consumes Kafka messages. This setting is ignored when Kafka is disabled.'
+ format: int32
+ minimum: 0.0
+ type: integer
+ logLevel:
+ default: info
+ description: '`logLevel` of the processor runtime'
+ enum:
+ - trace
+ - debug
+ - info
+ - warn
+ - error
+ - fatal
+ - panic
+ type: string
+ logTypes:
+ default: FLOWS
+ description: '`logTypes` defines the desired record types to generate. Possible values are:
- `FLOWS` (default) to export regular network flows
- `CONVERSATIONS` to generate events for started conversations, ended conversations as well as periodic "tick" updates
- `ENDED_CONVERSATIONS` to generate only ended conversations events
- `ALL` to generate both network flows and all conversations events
'
+ enum:
+ - FLOWS
+ - CONVERSATIONS
+ - ENDED_CONVERSATIONS
+ - ALL
+ type: string
+ metrics:
+ description: '`Metrics` define the processor configuration regarding metrics'
+ properties:
+ disableAlerts:
+ description: '`disableAlerts` is a list of alerts that should be disabled. Possible values are:
`NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
`NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
'
+ items:
+ description: Name of a processor alert. Possible values are:
- `NetObservNoFlows`, which is triggered when no flows are being observed for a certain period.
- `NetObservLokiError`, which is triggered when flows are being dropped due to Loki errors.
+ enum:
+ - NetObservNoFlows
+ - NetObservLokiError
+ type: string
+ type: array
+ ignoreTags:
+ default:
+ - egress
+ - packets
+ - nodes-flows
+ - namespaces-flows
+ - workloads-flows
+ - namespaces
+ description: '`ignoreTags` is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: `egress`, `ingress`, `flows`, `bytes`, `packets`, `namespaces`, `nodes`, `workloads`, `nodes-flows`, `namespaces-flows`, `workloads-flows`. Namespace-based metrics are covered by both `workloads` and `namespaces` tags, hence it is recommended to always ignore one of them (`workloads` offering a finer granularity).'
+ items:
+ type: string
+ type: array
+ server:
+ description: Metrics server endpoint configuration for Prometheus scraper
+ properties:
+ port:
+ default: 9102
+ description: The prometheus HTTP port
+ format: int32
+ maximum: 65535.0
+ minimum: 1.0
+ type: integer
+ tls:
+ description: TLS configuration.
+ properties:
+ insecureSkipVerify:
+ default: false
+ description: '`insecureSkipVerify` allows skipping client-side verification of the provided certificate. If set to `true`, the `providedCaFile` field is ignored.'
+ type: boolean
+ provided:
+ description: TLS configuration when `type` is set to `PROVIDED`.
+ properties:
+ certFile:
+ description: '`certFile` defines the path to the certificate file name within the config map or secret'
+ type: string
+ certKey:
+ description: '`certKey` defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.'
+ type: string
+ name:
+ description: Name of the config map or secret containing certificates
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing certificates. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the certificate reference: `configmap` or `secret`'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ providedCaFile:
+ description: Reference to the CA file when `type` is set to `PROVIDED`.
+ properties:
+ file:
+ description: File name within the config map or secret
+ type: string
+ name:
+ description: Name of the config map or secret containing the file
+ type: string
+ namespace:
+ default: ''
+ description: Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ type: string
+ type:
+ description: 'Type for the file reference: "configmap" or "secret"'
+ enum:
+ - configmap
+ - secret
+ type: string
+ type: object
+ type:
+ default: DISABLED
+ description: Select the type of TLS configuration:
- `DISABLED` (default) to not configure TLS for the endpoint. - `PROVIDED` to manually provide cert file and a key file. - `AUTO` to use OpenShift auto generated certificate using annotations.
+ enum:
+ - DISABLED
+ - PROVIDED
+ - AUTO
+ type: string
+ type: object
+ type: object
+ type: object
+ port:
+ default: 2055
+ description: Port of the flow collector (host port). By convention, some values are forbidden. It must be greater than 1024 and different from 4500, 4789 and 6081.
+ format: int32
+ maximum: 65535.0
+ minimum: 1025.0
+ type: integer
+ profilePort:
+ description: '`profilePort` allows setting up a Go pprof profiler listening to this port'
+ format: int32
+ maximum: 65535.0
+ minimum: 0.0
+ type: integer
+ resources:
+ default:
+ limits:
+ memory: 800Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ description: '`resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ properties:
+ claims:
+ description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers."
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ type: object
+ type: object
+ status:
+ description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
+ properties:
+ conditions:
+ description: '`conditions` represent the latest available observations of an object''s state'
+ items:
+ description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}"
+ properties:
+ lastTransitionTime:
+ description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: message is a human readable message indicating details about the transition. This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+ format: int64
+ minimum: 0.0
+ type: integer
+ reason:
+ description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - 'True'
+ - 'False'
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ namespace:
+ description: Namespace where console plugin and flowlogs-pipeline have been deployed.
+ type: string
+ required:
+ - conditions
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ''
+ plural: ''
+ conditions: []
+ storedVersions: []
diff --git a/kube-custom-resources-rs/Cargo.toml b/kube-custom-resources-rs/Cargo.toml
index 74f41461a..5862d2f47 100644
--- a/kube-custom-resources-rs/Cargo.toml
+++ b/kube-custom-resources-rs/Cargo.toml
@@ -128,6 +128,9 @@ externaldns_nginx_org_v1 = []
flagger_app_v1beta1 = []
flink_apache_org_v1beta1 = []
flow_volcano_sh_v1alpha1 = []
+flows_netobserv_io_v1alpha1 = []
+flows_netobserv_io_v1beta1 = []
+flows_netobserv_io_v1beta2 = []
flux_framework_org_v1alpha1 = []
gateway_networking_k8s_io_v1 = []
gateway_networking_k8s_io_v1alpha2 = []
diff --git a/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/flowcollectors.rs b/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/flowcollectors.rs
new file mode 100644
index 000000000..64b132c73
--- /dev/null
+++ b/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/flowcollectors.rs
@@ -0,0 +1,1637 @@
+// WARNING: generated by kopium - manual changes will be overwritten
+// kopium command: kopium --docs --filename ./crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml
+// kopium version: 0.16.1
+
+use kube::CustomResource;
+use serde::{Serialize, Deserialize};
+use std::collections::BTreeMap;
+use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
+
+/// FlowCollectorSpec defines the desired state of FlowCollector
+#[derive(CustomResource, Serialize, Deserialize, Clone, Debug)]
+#[kube(group = "flows.netobserv.io", version = "v1alpha1", kind = "FlowCollector", plural = "flowcollectors")]
+#[kube(status = "FlowCollectorStatus")]
+#[kube(schema = "disabled")]
+pub struct FlowCollectorSpec {
+ /// agent for flows extraction.
+ pub agent: FlowCollectorAgent,
+ /// consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "consolePlugin")]
+ pub console_plugin: Option,
+ /// deploymentModel defines the desired type of deployment for flow processing. Possible values are "DIRECT" (default) to make the flow processor listening directly from the agents, or "KAFKA" to make flows sent to a Kafka pipeline before consumption by the processor. Kafka can provide better scalability, resiliency and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+ #[serde(rename = "deploymentModel")]
+ pub deployment_model: FlowCollectorDeploymentModel,
+ /// exporters defines additional optional exporters for custom consumption or storage. This is an experimental feature. Currently, only KAFKA exporter is available.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub exporters: Option>,
+ /// kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA".
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub kafka: Option,
+ /// loki, the flow store, client settings.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub loki: Option,
+ /// namespace where NetObserv pods are deployed. If empty, the namespace of the operator is going to be used.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub processor: Option,
+}
+
+/// agent for flows extraction.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgent {
+ /// ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ebpf: Option,
+ /// ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX".
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ipfix: Option,
+ /// type selects the flows tracing agent. Possible values are "EBPF" (default) to use NetObserv eBPF agent, "IPFIX" to use the legacy IPFIX collector. "EBPF" is recommended in most cases as it offers better performances and should work regardless of the CNI installed on the cluster. "IPFIX" works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ #[serde(rename = "type")]
+ pub r#type: FlowCollectorAgentType,
+}
+
+/// ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentEbpf {
+ /// cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheActiveTimeout")]
+ pub cache_active_timeout: Option,
+ /// cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheMaxFlows")]
+ pub cache_max_flows: Option,
+ /// Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub debug: Option,
+ /// excludeInterfaces contains the interface names that will be excluded from flow tracing. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeInterfaces")]
+ pub exclude_interfaces: Option>,
+ /// imagePullPolicy is the Kubernetes pull policy for the image defined above
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")]
+ pub image_pull_policy: Option,
+ /// interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub interfaces: Option>,
+ /// kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaBatchSize")]
+ pub kafka_batch_size: Option,
+ /// logLevel defines the log level for the NetObserv eBPF Agent
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")]
+ pub log_level: Option,
+ /// privileged mode for the eBPF Agent container. In general this setting can be ignored or set to false: in that case, the operator will set granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container, to enable its correct operation. If for some reason these capabilities cannot be set (for example old kernel version not knowing CAP_BPF) then you can turn on this mode for more global privileges.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub privileged: Option,
+ /// resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub resources: Option,
+ /// sampling rate of the flow reporter. 100 means one flow on 100 is sent. 0 or 1 means all flows are sampled.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub sampling: Option,
+}
+
+/// Debug allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentEbpfDebug {
+ /// env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub env: Option>,
+}
+
+/// ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorAgentEbpfImagePullPolicy {
+ IfNotPresent,
+ Always,
+ Never,
+}
+
+/// ebpf describes the settings related to the eBPF-based flow reporter when the "agent.type" property is set to "EBPF".
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorAgentEbpfLogLevel {
+ #[serde(rename = "trace")]
+ Trace,
+ #[serde(rename = "debug")]
+ Debug,
+ #[serde(rename = "info")]
+ Info,
+ #[serde(rename = "warn")]
+ Warn,
+ #[serde(rename = "error")]
+ Error,
+ #[serde(rename = "fatal")]
+ Fatal,
+ #[serde(rename = "panic")]
+ Panic,
+}
+
+/// resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentEbpfResources {
+ /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
+ /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
+ /// This field is immutable. It can only be set for containers.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub claims: Option>,
+ /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub limits: Option>,
+ /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub requests: Option>,
+}
+
+/// ResourceClaim references one entry in PodSpec.ResourceClaims.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentEbpfResourcesClaims {
+ /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ pub name: String,
+}
+
+/// ipfix describes the settings related to the IPFIX-based flow reporter when the "agent.type" property is set to "IPFIX".
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentIpfix {
+ /// cacheActiveTimeout is the max period during which the reporter will aggregate flows before sending
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheActiveTimeout")]
+ pub cache_active_timeout: Option,
+ /// cacheMaxFlows is the max number of flows in an aggregate; when reached, the reporter sends the flows
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheMaxFlows")]
+ pub cache_max_flows: Option,
+ /// clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetworkOperator")]
+ pub cluster_network_operator: Option,
+ /// forceSampleAll allows disabling sampling in the IPFIX-based flow reporter. It is not recommended to sample all the traffic with IPFIX, as it might generate cluster instability. If you REALLY want to do that, set this flag to true. Use at your own risk. When it is set to true, the value of "sampling" is ignored.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceSampleAll")]
+ pub force_sample_all: Option,
+ /// ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "ovnKubernetes")]
+ pub ovn_kubernetes: Option,
+ /// sampling is the sampling rate on the reporter. 100 means one flow on 100 is sent. To ensure cluster stability, it is not possible to set a value below 2. If you really want to sample every packet, which might impact the cluster stability, refer to "forceSampleAll". Alternatively, you can use the eBPF Agent instead of IPFIX.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub sampling: Option,
+}
+
+/// clusterNetworkOperator defines the settings related to the OpenShift Cluster Network Operator, when available.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentIpfixClusterNetworkOperator {
+ /// namespace where the config map is going to be deployed.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+}
+
+/// ovnKubernetes defines the settings of the OVN-Kubernetes CNI, when available. This configuration is used when using OVN's IPFIX exports, without OpenShift. When using OpenShift, refer to the `clusterNetworkOperator` property instead.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentIpfixOvnKubernetes {
+ /// containerName defines the name of the container to configure for IPFIX.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerName")]
+ pub container_name: Option,
+ /// daemonSetName defines the name of the DaemonSet controlling the OVN-Kubernetes pods.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "daemonSetName")]
+ pub daemon_set_name: Option,
+ /// namespace where OVN-Kubernetes pods are deployed.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+}
+
+/// agent for flows extraction.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorAgentType {
+ #[serde(rename = "EBPF")]
+ Ebpf,
+ #[serde(rename = "IPFIX")]
+ Ipfix,
+}
+
+/// consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePlugin {
+ /// autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub autoscaler: Option,
+ /// imagePullPolicy is the Kubernetes pull policy for the image defined above
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")]
+ pub image_pull_policy: Option,
+ /// logLevel for the console plugin backend
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")]
+ pub log_level: Option,
+ /// port is the plugin service port
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub port: Option,
+ /// portNaming defines the configuration of the port-to-service name translation
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "portNaming")]
+ pub port_naming: Option,
+ /// quickFilters configures quick filter presets for the Console plugin
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "quickFilters")]
+ pub quick_filters: Option>,
+ /// register allows, when set to true, to automatically register the provided console plugin with the OpenShift Console operator. When set to false, you can still register it manually by editing console.operator.openshift.io/cluster. E.g: oc patch console.operator.openshift.io cluster --type='json' -p '[{"op": "add", "path": "/spec/plugins/-", "value": "netobserv-plugin"}]'
+ pub register: bool,
+ /// replicas defines the number of replicas (pods) to start.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub replicas: Option,
+ /// resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub resources: Option,
+}
+
+/// autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscaler {
+ /// maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxReplicas")]
+ pub max_replicas: Option,
+ /// metrics used by the pod autoscaler
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub metrics: Option>,
+ /// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReplicas")]
+ pub min_replicas: Option,
+ /// Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub status: Option,
+}
+
+/// MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetrics {
+ /// containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerResource")]
+ pub container_resource: Option,
+ /// external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub external: Option,
+ /// object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub object: Option,
+ /// pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub pods: Option,
+ /// resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub resource: Option,
+ /// type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+ #[serde(rename = "type")]
+ pub r#type: String,
+}
+
+/// containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsContainerResource {
+ /// container is the name of the container in the pods of the scaling target
+ pub container: String,
+ /// name is the name of the resource in question.
+ pub name: String,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorConsolePluginAutoscalerMetricsContainerResourceTarget,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsContainerResourceTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsExternal {
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorConsolePluginAutoscalerMetricsExternalMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorConsolePluginAutoscalerMetricsExternalTarget,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsExternalMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsExternalMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsExternalMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsExternalTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObject {
+ /// describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ #[serde(rename = "describedObject")]
+ pub described_object: FlowCollectorConsolePluginAutoscalerMetricsObjectDescribedObject,
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorConsolePluginAutoscalerMetricsObjectMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorConsolePluginAutoscalerMetricsObjectTarget,
+}
+
+/// describedObject specifies the descriptions of a object,such as kind,name apiVersion
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObjectDescribedObject {
+ /// apiVersion is the API version of the referent
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")]
+ pub api_version: Option,
+ /// kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ pub kind: String,
+ /// name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ pub name: String,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObjectMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObjectMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObjectMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsObjectTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsPods {
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorConsolePluginAutoscalerMetricsPodsMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorConsolePluginAutoscalerMetricsPodsTarget,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsPodsMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsPodsMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsPodsMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsPodsTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsResource {
+ /// name is the name of the resource in question.
+ pub name: String,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorConsolePluginAutoscalerMetricsResourceTarget,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginAutoscalerMetricsResourceTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// autoscaler spec of a horizontal pod autoscaler to set up for the plugin Deployment.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorConsolePluginAutoscalerStatus {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "ENABLED")]
+ Enabled,
+}
+
+/// consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorConsolePluginImagePullPolicy {
+ IfNotPresent,
+ Always,
+ Never,
+}
+
+/// consolePlugin defines the settings related to the OpenShift Console plugin, when available.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorConsolePluginLogLevel {
+ #[serde(rename = "trace")]
+ Trace,
+ #[serde(rename = "debug")]
+ Debug,
+ #[serde(rename = "info")]
+ Info,
+ #[serde(rename = "warn")]
+ Warn,
+ #[serde(rename = "error")]
+ Error,
+ #[serde(rename = "fatal")]
+ Fatal,
+ #[serde(rename = "panic")]
+ Panic,
+}
+
+/// portNaming defines the configuration of the port-to-service name translation
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginPortNaming {
+ /// enable the console plugin port-to-service name translation
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enable: Option,
+ /// portNames defines additional port names to use in the console. Example: portNames: {"3100": "loki"}
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "portNames")]
+ pub port_names: Option>,
+}
+
+/// QuickFilter defines preset configuration for Console's quick filters
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginQuickFilters {
+ /// default defines whether this filter should be active by default or not
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub default: Option,
+ /// filter is a set of keys and values to be set when this filter is selected. Each key can relate to a list of values using a coma-separated string. Example: filter: {"src_namespace": "namespace1,namespace2"}
+ pub filter: BTreeMap,
+ /// name of the filter, that will be displayed in Console
+ pub name: String,
+}
+
+/// resources, in terms of compute resources, required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginResources {
+ /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
+ /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
+ /// This field is immutable. It can only be set for containers.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub claims: Option>,
+ /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub limits: Option>,
+ /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub requests: Option>,
+}
+
+/// ResourceClaim references one entry in PodSpec.ResourceClaims.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorConsolePluginResourcesClaims {
+ /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ pub name: String,
+}
+
+/// FlowCollectorSpec defines the desired state of FlowCollector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorDeploymentModel {
+ #[serde(rename = "DIRECT")]
+ Direct,
+ #[serde(rename = "KAFKA")]
+ Kafka,
+}
+
+/// FlowCollectorExporter defines an additional exporter to send enriched flows to
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExporters {
+ /// IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ipfix: Option,
+ /// kafka configuration, such as address or topic, to send enriched flows to.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub kafka: Option,
+ /// `type` selects the type of exporters. The available options are `KAFKA` and `IPFIX`.
+ #[serde(rename = "type")]
+ pub r#type: FlowCollectorExportersType,
+}
+
+/// IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersIpfix {
+ /// Address of the IPFIX external receiver
+ #[serde(rename = "targetHost")]
+ pub target_host: String,
+ /// Port for the IPFIX external receiver
+ #[serde(rename = "targetPort")]
+ pub target_port: i64,
+ /// Transport protocol (`TCP` or `UDP`) to be used for the IPFIX connection, defaults to `TCP`.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub transport: Option,
+}
+
+/// IPFIX configuration, such as the IP address and port to send enriched IPFIX flows to.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersIpfixTransport {
+ #[serde(rename = "TCP")]
+ Tcp,
+ #[serde(rename = "UDP")]
+ Udp,
+}
+
+/// kafka configuration, such as address or topic, to send enriched flows to.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafka {
+ /// address of the Kafka server
+ pub address: String,
+ /// SASL authentication configuration. [Unsupported (*)].
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub sasl: Option,
+ /// tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub tls: Option,
+ /// kafka topic to use. It must exist, NetObserv will not create it.
+ pub topic: String,
+}
+
+/// SASL authentication configuration. [Unsupported (*)].
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaSasl {
+ /// Reference to the secret or config map containing the client ID
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientIDReference")]
+ pub client_id_reference: Option,
+ /// Reference to the secret or config map containing the client secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecretReference")]
+ pub client_secret_reference: Option,
+ /// Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client ID
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaSaslClientIdReference {
+ /// File name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub file: Option,
+ /// Name of the config map or secret containing the file
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// Type for the file reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client ID
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersKafkaSaslClientIdReferenceType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// Reference to the secret or config map containing the client secret
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaSaslClientSecretReference {
+ /// File name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub file: Option,
+ /// Name of the config map or secret containing the file
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// Type for the file reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client secret
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersKafkaSaslClientSecretReferenceType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// SASL authentication configuration. [Unsupported (*)].
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersKafkaSaslType {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "PLAIN")]
+ Plain,
+ #[serde(rename = "SCRAM-SHA512")]
+ ScramSha512,
+}
+
+/// tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaTls {
+ /// caCert defines the reference of the certificate for the Certificate Authority
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCert")]
+ pub ca_cert: Option,
+ /// enable TLS
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enable: Option,
+ /// insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "insecureSkipVerify")]
+ pub insecure_skip_verify: Option,
+ /// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "userCert")]
+ pub user_cert: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaTlsCaCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersKafkaTlsCaCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorExportersKafkaTlsUserCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersKafkaTlsUserCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// FlowCollectorExporter defines an additional exporter to send enriched flows to
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorExportersType {
+ #[serde(rename = "KAFKA")]
+ Kafka,
+ #[serde(rename = "IPFIX")]
+ Ipfix,
+}
+
+/// kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the "spec.deploymentModel" is "KAFKA".
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafka {
+ /// address of the Kafka server
+ pub address: String,
+ /// SASL authentication configuration. [Unsupported (*)].
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub sasl: Option,
+ /// tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub tls: Option,
+ /// kafka topic to use. It must exist, NetObserv will not create it.
+ pub topic: String,
+}
+
+/// SASL authentication configuration. [Unsupported (*)].
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaSasl {
+ /// Reference to the secret or config map containing the client ID
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientIDReference")]
+ pub client_id_reference: Option,
+ /// Reference to the secret or config map containing the client secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecretReference")]
+ pub client_secret_reference: Option,
+ /// Type of SASL authentication to use, or `DISABLED` if SASL is not used
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client ID
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaSaslClientIdReference {
+ /// File name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub file: Option,
+ /// Name of the config map or secret containing the file
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// Type for the file reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client ID
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorKafkaSaslClientIdReferenceType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// Reference to the secret or config map containing the client secret
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaSaslClientSecretReference {
+ /// File name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub file: Option,
+ /// Name of the config map or secret containing the file
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// Namespace of the config map or secret containing the file. If omitted, the default is to use the same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret is copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// Type for the file reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// Reference to the secret or config map containing the client secret
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorKafkaSaslClientSecretReferenceType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// SASL authentication configuration. [Unsupported (*)].
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorKafkaSaslType {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "PLAIN")]
+ Plain,
+ #[serde(rename = "SCRAM-SHA512")]
+ ScramSha512,
+}
+
+/// tls client configuration. When using TLS, verify that the address matches the Kafka port used for TLS, generally 9093. Note that, when eBPF agents are used, Kafka certificate needs to be copied in the agent namespace (by default it's netobserv-privileged).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaTls {
+ /// caCert defines the reference of the certificate for the Certificate Authority
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCert")]
+ pub ca_cert: Option,
+ /// enable TLS
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enable: Option,
+ /// insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "insecureSkipVerify")]
+ pub insecure_skip_verify: Option,
+ /// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "userCert")]
+ pub user_cert: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaTlsCaCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorKafkaTlsCaCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorKafkaTlsUserCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorKafkaTlsUserCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// loki, the flow store, client settings.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorLoki {
+ /// AuthToken describe the way to get a token to authenticate to Loki. DISABLED will not send any token with the request. HOST will use the local pod service account to authenticate to Loki. FORWARD will forward user token, in this mode, pod that are not receiving user request like the processor will use the local pod service account. Similar to HOST mode. When using the Loki Operator, set it to `HOST` or `FORWARD`.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "authToken")]
+ pub auth_token: Option,
+ /// batchSize is max batch size (in bytes) of logs to accumulate before sending.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "batchSize")]
+ pub batch_size: Option,
+ /// batchWait is max time to wait before sending a batch.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "batchWait")]
+ pub batch_wait: Option,
+ /// maxBackoff is the maximum backoff time for client connection between retries.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxBackoff")]
+ pub max_backoff: Option,
+ /// maxRetries is the maximum number of retries for client connections.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRetries")]
+ pub max_retries: Option,
+ /// minBackoff is the initial backoff time for client connection between retries.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "minBackoff")]
+ pub min_backoff: Option,
+ /// querierURL specifies the address of the Loki querier service, in case it is different from the Loki ingester URL. If empty, the URL value will be used (assuming that the Loki ingester and querier are in the same server). When using the Loki Operator, do not set it, since ingestion and queries use the Loki gateway.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "querierUrl")]
+ pub querier_url: Option,
+ /// staticLabels is a map of common labels to set on each flow.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "staticLabels")]
+ pub static_labels: Option>,
+ /// statusURL specifies the address of the Loki /ready /metrics /config endpoints, in case it is different from the Loki querier URL. If empty, the QuerierURL value will be used. This is useful to show error messages and some context in the frontend. When using the Loki Operator, set it to the Loki HTTP query frontend service, for example https://loki-query-frontend-http.netobserv.svc:3100/.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "statusUrl")]
+ pub status_url: Option,
+ /// tenantID is the Loki X-Scope-OrgID that identifies the tenant for each request. When using the Loki Operator, set it to `network`, which corresponds to a special tenant mode.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "tenantID")]
+ pub tenant_id: Option,
+ /// timeout is the maximum time connection / request limit. A Timeout of zero means no timeout.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub timeout: Option,
+ /// tls client configuration.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub tls: Option,
+ /// url is the address of an existing Loki service to push the flows to. When using the Loki Operator, set it to the Loki gateway service with the `network` tenant set in path, for example https://loki-gateway-http.netobserv.svc:8080/api/logs/v1/network.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub url: Option,
+}
+
+/// loki, the flow store, client settings.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorLokiAuthToken {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "HOST")]
+ Host,
+ #[serde(rename = "FORWARD")]
+ Forward,
+}
+
+/// tls client configuration.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorLokiTls {
+ /// caCert defines the reference of the certificate for the Certificate Authority
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCert")]
+ pub ca_cert: Option,
+ /// enable TLS
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub enable: Option,
+ /// insecureSkipVerify allows skipping client-side verification of the server certificate If set to true, CACert field will be ignored
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "insecureSkipVerify")]
+ pub insecure_skip_verify: Option,
+ /// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "userCert")]
+ pub user_cert: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorLokiTlsCaCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// caCert defines the reference of the certificate for the Certificate Authority
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorLokiTlsCaCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorLokiTlsUserCert {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// userCert defines the user certificate reference, used for mTLS (you can ignore it when using regular, one-way TLS)
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorLokiTlsUserCertType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessor {
+ /// Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub debug: Option,
+ /// dropUnusedFields allows, when set to true, to drop fields that are known to be unused by OVS, in order to save storage space.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "dropUnusedFields")]
+ pub drop_unused_fields: Option,
+ /// enableKubeProbes is a flag to enable or disable Kubernetes liveness and readiness probes
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableKubeProbes")]
+ pub enable_kube_probes: Option,
+ /// healthPort is a collector HTTP port in the Pod that exposes the health check API
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "healthPort")]
+ pub health_port: Option,
+ /// imagePullPolicy is the Kubernetes pull policy for the image defined above
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")]
+ pub image_pull_policy: Option,
+ /// kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaConsumerAutoscaler")]
+ pub kafka_consumer_autoscaler: Option,
+ /// kafkaConsumerBatchSize indicates to the broker the maximum batch size, in bytes, that the consumer will accept. Ignored when not using Kafka. Default: 10MB.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaConsumerBatchSize")]
+ pub kafka_consumer_batch_size: Option,
+ /// kafkaConsumerQueueCapacity defines the capacity of the internal message queue used in the Kafka consumer client. Ignored when not using Kafka.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaConsumerQueueCapacity")]
+ pub kafka_consumer_queue_capacity: Option,
+ /// kafkaConsumerReplicas defines the number of replicas (pods) to start for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaConsumerReplicas")]
+ pub kafka_consumer_replicas: Option,
+ /// logLevel of the collector runtime
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")]
+ pub log_level: Option,
+ /// Metrics define the processor configuration regarding metrics
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub metrics: Option,
+ /// port of the flow collector (host port) By conventions, some value are not authorized port must not be below 1024 and must not equal this values: 4789,6081,500, and 4500
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub port: Option,
+ /// profilePort allows setting up a Go pprof profiler listening to this port
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilePort")]
+ pub profile_port: Option,
+ /// resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub resources: Option,
+}
+
+/// Debug allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations (for example GOGC, GOMAXPROCS env vars). Users setting its values do it at their own risk.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorDebug {
+ /// env allows passing custom environment variables to the NetObserv Agent. Useful for passing some very concrete performance-tuning options (such as GOGC, GOMAXPROCS) that shouldn't be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug and support scenarios.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub env: Option>,
+}
+
+/// processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorProcessorImagePullPolicy {
+ IfNotPresent,
+ Always,
+ Never,
+}
+
+/// kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscaler {
+ /// maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxReplicas")]
+ pub max_replicas: Option,
+ /// metrics used by the pod autoscaler
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub metrics: Option>,
+ /// minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReplicas")]
+ pub min_replicas: Option,
+ /// Status describe the desired status regarding deploying an horizontal pod autoscaler DISABLED will not deploy an horizontal pod autoscaler ENABLED will deploy an horizontal pod autoscaler
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub status: Option,
+}
+
+/// MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetrics {
+ /// containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerResource")]
+ pub container_resource: Option,
+ /// external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub external: Option,
+ /// object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub object: Option,
+ /// pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub pods: Option,
+ /// resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub resource: Option,
+ /// type is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled
+ #[serde(rename = "type")]
+ pub r#type: String,
+}
+
+/// containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsContainerResource {
+ /// container is the name of the container in the pods of the scaling target
+ pub container: String,
+ /// name is the name of the resource in question.
+ pub name: String,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsContainerResourceTarget,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsContainerResourceTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternal {
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalTarget,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsExternalTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObject {
+ /// describedObject specifies the descriptions of a object,such as kind,name apiVersion
+ #[serde(rename = "describedObject")]
+ pub described_object: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectDescribedObject,
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectTarget,
+}
+
+/// describedObject specifies the descriptions of a object,such as kind,name apiVersion
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectDescribedObject {
+ /// apiVersion is the API version of the referent
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")]
+ pub api_version: Option,
+ /// kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ pub kind: String,
+ /// name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ pub name: String,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsObjectTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPods {
+ /// metric identifies the target metric by name and selector
+ pub metric: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsMetric,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsTarget,
+}
+
+/// metric identifies the target metric by name and selector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsMetric {
+ /// name is the name of the given metric
+ pub name: String,
+ /// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub selector: Option,
+}
+
+/// selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsMetricSelector {
+ /// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")]
+ pub match_expressions: Option>,
+ /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")]
+ pub match_labels: Option>,
+}
+
+/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsMetricSelectorMatchExpressions {
+ /// key is the label key that the selector applies to.
+ pub key: String,
+ /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ pub operator: String,
+ /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub values: Option>,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsPodsTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsResource {
+ /// name is the name of the resource in question.
+ pub name: String,
+ /// target specifies the target value for the given metric
+ pub target: FlowCollectorProcessorKafkaConsumerAutoscalerMetricsResourceTarget,
+}
+
+/// target specifies the target value for the given metric
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorKafkaConsumerAutoscalerMetricsResourceTarget {
+ /// averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageUtilization")]
+ pub average_utilization: Option,
+ /// averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "averageValue")]
+ pub average_value: Option,
+ /// type represents whether the metric type is Utilization, Value, or AverageValue
+ #[serde(rename = "type")]
+ pub r#type: String,
+ /// value is the target value of the metric (as a quantity).
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub value: Option,
+}
+
+/// kafkaConsumerAutoscaler spec of a horizontal pod autoscaler to set up for flowlogs-pipeline-transformer, which consumes Kafka messages. This setting is ignored when Kafka is disabled.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorProcessorKafkaConsumerAutoscalerStatus {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "ENABLED")]
+ Enabled,
+}
+
+/// processor defines the settings of the component that receives the flows from the agent, enriches them, and forwards them to the Loki persistence layer.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorProcessorLogLevel {
+ #[serde(rename = "trace")]
+ Trace,
+ #[serde(rename = "debug")]
+ Debug,
+ #[serde(rename = "info")]
+ Info,
+ #[serde(rename = "warn")]
+ Warn,
+ #[serde(rename = "error")]
+ Error,
+ #[serde(rename = "fatal")]
+ Fatal,
+ #[serde(rename = "panic")]
+ Panic,
+}
+
+/// Metrics define the processor configuration regarding metrics
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorMetrics {
+ /// ignoreTags is a list of tags to specify which metrics to ignore. Each metric is associated with a list of tags. More details in https://github.com/netobserv/network-observability-operator/tree/main/controllers/flowlogspipeline/metrics_definitions . Available tags are: egress, ingress, flows, bytes, packets, namespaces, nodes, workloads
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "ignoreTags")]
+ pub ignore_tags: Option>,
+ /// metricsServer endpoint configuration for Prometheus scraper
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub server: Option,
+}
+
+/// metricsServer endpoint configuration for Prometheus scraper
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorMetricsServer {
+ /// the prometheus HTTP port
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub port: Option,
+ /// TLS configuration.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub tls: Option,
+}
+
+/// TLS configuration.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorMetricsServerTls {
+ /// TLS configuration.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub provided: Option,
+ /// Select the type of TLS configuration "DISABLED" (default) to not configure TLS for the endpoint, "PROVIDED" to manually provide cert file and a key file, and "AUTO" to use OpenShift auto generated certificate using annotations
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// TLS configuration.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorMetricsServerTlsProvided {
+ /// certFile defines the path to the certificate file name within the config map or secret
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certFile")]
+ pub cert_file: Option,
+ /// certKey defines the path to the certificate private key file name within the config map or secret. Omit when the key is not necessary.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "certKey")]
+ pub cert_key: Option,
+ /// name of the config map or secret containing certificates
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub name: Option,
+ /// namespace of the config map or secret containing certificates. If omitted, assumes same namespace as where NetObserv is deployed. If the namespace is different, the config map or the secret will be copied so that it can be mounted as required.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// type for the certificate reference: "configmap" or "secret"
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// TLS configuration.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorProcessorMetricsServerTlsProvidedType {
+ #[serde(rename = "configmap")]
+ Configmap,
+ #[serde(rename = "secret")]
+ Secret,
+}
+
+/// TLS configuration.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorProcessorMetricsServerTlsType {
+ #[serde(rename = "DISABLED")]
+ Disabled,
+ #[serde(rename = "PROVIDED")]
+ Provided,
+ #[serde(rename = "AUTO")]
+ Auto,
+}
+
+/// resources are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorResources {
+ /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.
+ /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate.
+ /// This field is immutable. It can only be set for containers.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub claims: Option>,
+ /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub limits: Option>,
+ /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub requests: Option>,
+}
+
+/// ResourceClaim references one entry in PodSpec.ResourceClaims.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorProcessorResourcesClaims {
+ /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.
+ pub name: String,
+}
+
+/// FlowCollectorStatus defines the observed state of FlowCollector
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorStatus {
+ /// conditions represent the latest available observations of an object's state
+ pub conditions: Vec,
+ /// namespace where console plugin and flowlogs-pipeline have been deployed.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+}
+
+/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example,
+/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+/// // other fields }
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorStatusConditions {
+ /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ #[serde(rename = "lastTransitionTime")]
+ pub last_transition_time: String,
+ /// message is a human readable message indicating details about the transition. This may be an empty string.
+ pub message: String,
+ /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")]
+ pub observed_generation: Option,
+ /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+ pub reason: String,
+ /// status of the condition, one of True, False, Unknown.
+ pub status: FlowCollectorStatusConditionsStatus,
+ /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ #[serde(rename = "type")]
+ pub r#type: String,
+}
+
+/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example,
+/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+/// // other fields }
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub enum FlowCollectorStatusConditionsStatus {
+ True,
+ False,
+ Unknown,
+}
+
diff --git a/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/mod.rs b/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/mod.rs
new file mode 100644
index 000000000..ccf7423dd
--- /dev/null
+++ b/kube-custom-resources-rs/src/flows_netobserv_io_v1alpha1/mod.rs
@@ -0,0 +1 @@
+pub mod flowcollectors;
diff --git a/kube-custom-resources-rs/src/flows_netobserv_io_v1beta1/flowcollectors.rs b/kube-custom-resources-rs/src/flows_netobserv_io_v1beta1/flowcollectors.rs
new file mode 100644
index 000000000..8576647e1
--- /dev/null
+++ b/kube-custom-resources-rs/src/flows_netobserv_io_v1beta1/flowcollectors.rs
@@ -0,0 +1,1789 @@
+// WARNING: generated by kopium - manual changes will be overwritten
+// kopium command: kopium --docs --filename ./crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml
+// kopium version: 0.16.1
+
+use kube::CustomResource;
+use serde::{Serialize, Deserialize};
+use std::collections::BTreeMap;
+use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString;
+
+/// Defines the desired state of the FlowCollector resource.
*: the mention of "unsupported", or "deprecated" for a feature throughout this document means that this feature is not officially supported by Red Hat. It might have been, for instance, contributed by the community and accepted without a formal agreement for maintenance. The product maintainers might provide some support for these features as a best effort only.
+#[derive(CustomResource, Serialize, Deserialize, Clone, Debug)]
+#[kube(group = "flows.netobserv.io", version = "v1beta1", kind = "FlowCollector", plural = "flowcollectors")]
+#[kube(status = "FlowCollectorStatus")]
+#[kube(schema = "disabled")]
+pub struct FlowCollectorSpec {
+ /// Agent configuration for flows extraction.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub agent: Option,
+ /// `consolePlugin` defines the settings related to the OpenShift Console plugin, when available.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "consolePlugin")]
+ pub console_plugin: Option,
+ /// `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- `DIRECT` (default) to make the flow processor listening directly from the agents.
- `KAFKA` to make flows sent to a Kafka pipeline before consumption by the processor.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "deploymentModel")]
+ pub deployment_model: Option,
+ /// `exporters` define additional optional exporters for custom consumption or storage.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub exporters: Option>,
+ /// Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `KAFKA`.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub kafka: Option,
+ /// `loki`, the flow store, client settings.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub loki: Option,
+ /// Namespace where NetObserv pods are deployed.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub namespace: Option,
+ /// `processor` defines the settings of the component that receives the flows from the agent, enriches them, generates metrics, and forwards them to the Loki persistence layer and/or any available exporter.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub processor: Option,
+}
+
+/// Agent configuration for flows extraction.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgent {
+ /// `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ebpf: Option,
+ /// `ipfix` [deprecated (*)] - describes the settings related to the IPFIX-based flow reporter when `spec.agent.type` is set to `IPFIX`.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub ipfix: Option,
+ /// `type` selects the flows tracing agent. Possible values are:
- `EBPF` (default) to use NetObserv eBPF agent.
- `IPFIX` [deprecated (*)] - to use the legacy IPFIX collector.
`EBPF` is recommended as it offers better performances and should work regardless of the CNI installed on the cluster. `IPFIX` works with OVN-Kubernetes CNI (other CNIs could work if they support exporting IPFIX, but they would require manual configuration).
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")]
+ pub r#type: Option,
+}
+
+/// `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `EBPF`.
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct FlowCollectorAgentEbpf {
+ /// `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheActiveTimeout")]
+ pub cache_active_timeout: Option,
+ /// `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheMaxFlows")]
+ pub cache_max_flows: Option,
+ /// `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub debug: Option,
+ /// `excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string.
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeInterfaces")]
+ pub exclude_interfaces: Option>,
+ /// List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting the kernel debug filesystem, so the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `DNSTracking`: enable the DNS tracking feature. This feature requires mounting the kernel debug filesystem hence the eBPF pod has to run as privileged. If the `spec.agent.eBPF.privileged` parameter is not set, an error is reported.
- `FlowRTT` [unsupported (*)]: enable flow latency (RTT) calculations in the eBPF agent during TCP handshakes. This feature better works with `sampling` set to 1.
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub features: Option>,
+ /// `imagePullPolicy` is the Kubernetes pull policy for the image defined above
+ #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")]
+ pub image_pull_policy: Option