diff --git a/code-generator/src/catalog.rs b/code-generator/src/catalog.rs index 6fc9bf588..0078de3d4 100644 --- a/code-generator/src/catalog.rs +++ b/code-generator/src/catalog.rs @@ -2888,6 +2888,7 @@ pub const CRD_V1_SOURCES: &'static [UpstreamSource] = &[ "https://github.com/strimzi/strimzi-kafka-operator/blob/main/helm-charts/helm3/strimzi-kafka-operator/crds/047-Crd-kafkaconnector.yaml", "https://github.com/strimzi/strimzi-kafka-operator/blob/main/helm-charts/helm3/strimzi-kafka-operator/crds/048-Crd-kafkamirrormaker2.yaml", "https://github.com/strimzi/strimzi-kafka-operator/blob/main/helm-charts/helm3/strimzi-kafka-operator/crds/049-Crd-kafkarebalance.yaml", + "https://github.com/strimzi/strimzi-kafka-operator/blob/main/helm-charts/helm3/strimzi-kafka-operator/crds/04A-Crd-kafkanodepool.yaml", ], }, UpstreamSource { diff --git a/crd-catalog/strimzi/strimzi-kafka-operator/kafka.strimzi.io/v1beta2/kafkanodepools.yaml b/crd-catalog/strimzi/strimzi-kafka-operator/kafka.strimzi.io/v1beta2/kafkanodepools.yaml new file mode 100644 index 000000000..63a3d57c2 --- /dev/null +++ b/crd-catalog/strimzi/strimzi-kafka-operator/kafka.strimzi.io/v1beta2/kafkanodepools.yaml @@ -0,0 +1,1170 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + labels: + app: "strimzi" + strimzi.io/crd-install: "true" + name: "kafkanodepools.kafka.strimzi.io" +spec: + conversion: + strategy: "None" + group: "kafka.strimzi.io" + names: + categories: + - "strimzi" + kind: "KafkaNodePool" + listKind: "KafkaNodePoolList" + plural: "kafkanodepools" + shortNames: + - "knp" + singular: "kafkanodepool" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "The desired number of replicas" + jsonPath: ".spec.replicas" + name: "Desired replicas" + type: "integer" + - description: "Roles of the nodes in the pool" + jsonPath: ".status.roles" + name: "Roles" + type: "string" + - description: "Node IDs used by Kafka nodes in this pool" + jsonPath: ".status.nodeIds" + name: "NodeIds" + type: "string" + name: "v1beta2" + schema: + openAPIV3Schema: + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "The specification of the KafkaNodePool." + properties: + jvmOptions: + description: "JVM Options for pods." + properties: + -XX: + additionalProperties: + type: "string" + description: "A map of -XX options to the JVM." + type: "object" + -Xms: + description: "-Xms option to to the JVM." + pattern: "^[0-9]+[mMgG]?$" + type: "string" + -Xmx: + description: "-Xmx option to to the JVM." + pattern: "^[0-9]+[mMgG]?$" + type: "string" + gcLoggingEnabled: + description: "Specifies whether the Garbage Collection logging is enabled. The default is false." + type: "boolean" + javaSystemProperties: + description: "A map of additional system properties which will be passed using the `-D` option to the JVM." + items: + properties: + name: + description: "The system property name." + type: "string" + value: + description: "The system property value." + type: "string" + type: "object" + type: "array" + type: "object" + replicas: + description: "The number of pods in the pool." + minimum: 0.0 + type: "integer" + resources: + description: "CPU and memory resources to reserve." + properties: + claims: + items: + properties: + name: + type: "string" + type: "object" + type: "array" + limits: + additionalProperties: + anyOf: + - type: "integer" + - type: "string" + pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" + x-kubernetes-int-or-string: true + type: "object" + requests: + additionalProperties: + anyOf: + - type: "integer" + - type: "string" + pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" + x-kubernetes-int-or-string: true + type: "object" + type: "object" + roles: + description: "The roles that the nodes in this pool will have when KRaft mode is enabled. Supported values are 'broker' and 'controller'. This field is required. When KRaft mode is disabled, the only allowed value if `broker`." + items: + enum: + - "controller" + - "broker" + type: "string" + type: "array" + storage: + description: "Storage configuration (disk). Cannot be updated." + properties: + class: + description: "The storage class to use for dynamic volume allocation." + type: "string" + deleteClaim: + description: "Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed." + type: "boolean" + id: + description: "Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'." + minimum: 0.0 + type: "integer" + kraftMetadata: + description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set." + enum: + - "shared" + type: "string" + overrides: + description: "Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers." + items: + properties: + broker: + description: "Id of the kafka broker (broker identifier)." + type: "integer" + class: + description: "The storage class to use for dynamic volume allocation for this broker." + type: "string" + type: "object" + type: "array" + selector: + additionalProperties: + type: "string" + description: "Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume." + type: "object" + size: + description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`." + type: "string" + sizeLimit: + description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)." + pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" + type: "string" + type: + description: "Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'." + enum: + - "ephemeral" + - "persistent-claim" + - "jbod" + type: "string" + volumes: + description: "List of volumes as Storage objects representing the JBOD disks array." + items: + properties: + class: + description: "The storage class to use for dynamic volume allocation." + type: "string" + deleteClaim: + description: "Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed." + type: "boolean" + id: + description: "Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration." + minimum: 0.0 + type: "integer" + kraftMetadata: + description: "Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set." + enum: + - "shared" + type: "string" + overrides: + description: "Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers." + items: + properties: + broker: + description: "Id of the kafka broker (broker identifier)." + type: "integer" + class: + description: "The storage class to use for dynamic volume allocation for this broker." + type: "string" + type: "object" + type: "array" + selector: + additionalProperties: + type: "string" + description: "Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume." + type: "object" + size: + description: "When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`." + type: "string" + sizeLimit: + description: "When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi)." + pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" + type: "string" + type: + description: "Storage type, must be either 'ephemeral' or 'persistent-claim'." + enum: + - "ephemeral" + - "persistent-claim" + type: "string" + required: + - "type" + type: "object" + type: "array" + required: + - "type" + type: "object" + template: + description: "Template for pool resources. The template allows users to specify how the resources belonging to this pool are generated." + properties: + initContainer: + description: "Template for the Kafka init container." + properties: + env: + description: "Environment variables which should be applied to the container." + items: + properties: + name: + description: "The environment variable key." + type: "string" + value: + description: "The environment variable value." + type: "string" + type: "object" + type: "array" + securityContext: + description: "Security context for the container." + properties: + allowPrivilegeEscalation: + type: "boolean" + appArmorProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + capabilities: + properties: + add: + items: + type: "string" + type: "array" + drop: + items: + type: "string" + type: "array" + type: "object" + privileged: + type: "boolean" + procMount: + type: "string" + readOnlyRootFilesystem: + type: "boolean" + runAsGroup: + type: "integer" + runAsNonRoot: + type: "boolean" + runAsUser: + type: "integer" + seLinuxOptions: + properties: + level: + type: "string" + role: + type: "string" + type: + type: "string" + user: + type: "string" + type: "object" + seccompProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + windowsOptions: + properties: + gmsaCredentialSpec: + type: "string" + gmsaCredentialSpecName: + type: "string" + hostProcess: + type: "boolean" + runAsUserName: + type: "string" + type: "object" + type: "object" + volumeMounts: + description: "Additional volume mounts which should be applied to the container." + items: + properties: + mountPath: + type: "string" + mountPropagation: + type: "string" + name: + type: "string" + readOnly: + type: "boolean" + recursiveReadOnly: + type: "string" + subPath: + type: "string" + subPathExpr: + type: "string" + type: "object" + type: "array" + type: "object" + kafkaContainer: + description: "Template for the Kafka broker container." + properties: + env: + description: "Environment variables which should be applied to the container." + items: + properties: + name: + description: "The environment variable key." + type: "string" + value: + description: "The environment variable value." + type: "string" + type: "object" + type: "array" + securityContext: + description: "Security context for the container." + properties: + allowPrivilegeEscalation: + type: "boolean" + appArmorProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + capabilities: + properties: + add: + items: + type: "string" + type: "array" + drop: + items: + type: "string" + type: "array" + type: "object" + privileged: + type: "boolean" + procMount: + type: "string" + readOnlyRootFilesystem: + type: "boolean" + runAsGroup: + type: "integer" + runAsNonRoot: + type: "boolean" + runAsUser: + type: "integer" + seLinuxOptions: + properties: + level: + type: "string" + role: + type: "string" + type: + type: "string" + user: + type: "string" + type: "object" + seccompProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + windowsOptions: + properties: + gmsaCredentialSpec: + type: "string" + gmsaCredentialSpecName: + type: "string" + hostProcess: + type: "boolean" + runAsUserName: + type: "string" + type: "object" + type: "object" + volumeMounts: + description: "Additional volume mounts which should be applied to the container." + items: + properties: + mountPath: + type: "string" + mountPropagation: + type: "string" + name: + type: "string" + readOnly: + type: "boolean" + recursiveReadOnly: + type: "string" + subPath: + type: "string" + subPathExpr: + type: "string" + type: "object" + type: "array" + type: "object" + perPodIngress: + description: "Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes." + properties: + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + type: "object" + perPodRoute: + description: "Template for Kafka per-pod `Routes` used for access from outside of OpenShift." + properties: + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + type: "object" + perPodService: + description: "Template for Kafka per-pod `Services` used for access from outside of Kubernetes." + properties: + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + type: "object" + persistentVolumeClaim: + description: "Template for all Kafka `PersistentVolumeClaims`." + properties: + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + type: "object" + pod: + description: "Template for Kafka `Pods`." + properties: + affinity: + description: "The pod's affinity rules." + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchFields: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + type: "object" + weight: + type: "integer" + type: "object" + type: "array" + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchFields: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + type: "object" + type: "array" + type: "object" + type: "object" + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + matchLabelKeys: + items: + type: "string" + type: "array" + mismatchLabelKeys: + items: + type: "string" + type: "array" + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + namespaces: + items: + type: "string" + type: "array" + topologyKey: + type: "string" + type: "object" + weight: + type: "integer" + type: "object" + type: "array" + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + matchLabelKeys: + items: + type: "string" + type: "array" + mismatchLabelKeys: + items: + type: "string" + type: "array" + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + namespaces: + items: + type: "string" + type: "array" + topologyKey: + type: "string" + type: "object" + type: "array" + type: "object" + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + matchLabelKeys: + items: + type: "string" + type: "array" + mismatchLabelKeys: + items: + type: "string" + type: "array" + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + namespaces: + items: + type: "string" + type: "array" + topologyKey: + type: "string" + type: "object" + weight: + type: "integer" + type: "object" + type: "array" + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + matchLabelKeys: + items: + type: "string" + type: "array" + mismatchLabelKeys: + items: + type: "string" + type: "array" + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + namespaces: + items: + type: "string" + type: "array" + topologyKey: + type: "string" + type: "object" + type: "array" + type: "object" + type: "object" + enableServiceLinks: + description: "Indicates whether information about services should be injected into Pod's environment variables." + type: "boolean" + hostAliases: + description: "The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified." + items: + properties: + hostnames: + items: + type: "string" + type: "array" + ip: + type: "string" + type: "object" + type: "array" + imagePullSecrets: + description: "List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored." + items: + properties: + name: + type: "string" + type: "object" + type: "array" + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + priorityClassName: + description: "The name of the priority class used to assign priority to the pods. " + type: "string" + schedulerName: + description: "The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used." + type: "string" + securityContext: + description: "Configures pod-level security attributes and common container settings." + properties: + appArmorProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + fsGroup: + type: "integer" + fsGroupChangePolicy: + type: "string" + runAsGroup: + type: "integer" + runAsNonRoot: + type: "boolean" + runAsUser: + type: "integer" + seLinuxOptions: + properties: + level: + type: "string" + role: + type: "string" + type: + type: "string" + user: + type: "string" + type: "object" + seccompProfile: + properties: + localhostProfile: + type: "string" + type: + type: "string" + type: "object" + supplementalGroups: + items: + type: "integer" + type: "array" + sysctls: + items: + properties: + name: + type: "string" + value: + type: "string" + type: "object" + type: "array" + windowsOptions: + properties: + gmsaCredentialSpec: + type: "string" + gmsaCredentialSpecName: + type: "string" + hostProcess: + type: "boolean" + runAsUserName: + type: "string" + type: "object" + type: "object" + terminationGracePeriodSeconds: + description: "The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds." + minimum: 0.0 + type: "integer" + tmpDirSizeLimit: + description: "Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources." + pattern: "^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" + type: "string" + tolerations: + description: "The pod's tolerations." + items: + properties: + effect: + type: "string" + key: + type: "string" + operator: + type: "string" + tolerationSeconds: + type: "integer" + value: + type: "string" + type: "object" + type: "array" + topologySpreadConstraints: + description: "The pod's topology spread constraints." + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: "string" + operator: + type: "string" + values: + items: + type: "string" + type: "array" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + type: "object" + type: "object" + matchLabelKeys: + items: + type: "string" + type: "array" + maxSkew: + type: "integer" + minDomains: + type: "integer" + nodeAffinityPolicy: + type: "string" + nodeTaintsPolicy: + type: "string" + topologyKey: + type: "string" + whenUnsatisfiable: + type: "string" + type: "object" + type: "array" + volumes: + description: "Additional volumes that can be mounted to the pod." + items: + oneOf: + - properties: + configMap: {} + emptyDir: {} + persistentVolumeClaim: {} + secret: {} + required: [] + properties: + configMap: + description: "ConfigMap to use to populate the volume." + properties: + defaultMode: + type: "integer" + items: + items: + properties: + key: + type: "string" + mode: + type: "integer" + path: + type: "string" + type: "object" + type: "array" + name: + type: "string" + optional: + type: "boolean" + type: "object" + emptyDir: + description: "EmptyDir to use to populate the volume." + properties: + medium: + type: "string" + sizeLimit: + properties: + amount: + type: "string" + format: + type: "string" + type: "object" + type: "object" + name: + description: "Name to use for the volume. Required." + type: "string" + persistentVolumeClaim: + description: "PersistentVolumeClaim object to use to populate the volume." + properties: + claimName: + type: "string" + readOnly: + type: "boolean" + type: "object" + secret: + description: "Secret to use populate the volume." + properties: + defaultMode: + type: "integer" + items: + items: + properties: + key: + type: "string" + mode: + type: "integer" + path: + type: "string" + type: "object" + type: "array" + optional: + type: "boolean" + secretName: + type: "string" + type: "object" + type: "object" + type: "array" + type: "object" + podSet: + description: "Template for Kafka `StrimziPodSet` resource." + properties: + metadata: + description: "Metadata applied to the resource." + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations added to the Kubernetes resource." + type: "object" + labels: + additionalProperties: + type: "string" + description: "Labels added to the Kubernetes resource." + type: "object" + type: "object" + type: "object" + type: "object" + required: + - "replicas" + - "storage" + - "roles" + type: "object" + status: + description: "The status of the KafkaNodePool." + properties: + clusterId: + description: "Kafka cluster ID." + type: "string" + conditions: + description: "List of status conditions." + items: + properties: + lastTransitionTime: + description: "Last time the condition of a type changed from one status to another. The required format is 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone." + type: "string" + message: + description: "Human-readable message indicating details about the condition's last transition." + type: "string" + reason: + description: "The reason for the condition's last transition (a single word in CamelCase)." + type: "string" + status: + description: "The status of the condition, either True, False or Unknown." + type: "string" + type: + description: "The unique identifier of a condition, used to distinguish between other conditions in the resource." + type: "string" + type: "object" + type: "array" + labelSelector: + description: "Label selector for pods providing this resource." + type: "string" + nodeIds: + description: "Node IDs used by Kafka nodes in this pool." + items: + type: "integer" + type: "array" + observedGeneration: + description: "The generation of the CRD that was last reconciled by the operator." + type: "integer" + replicas: + description: "The current number of pods being used to provide this resource." + type: "integer" + roles: + description: "The roles currently assigned to this pool." + items: + enum: + - "controller" + - "broker" + type: "string" + type: "array" + type: "object" + type: "object" + served: true + storage: true + subresources: + scale: + labelSelectorPath: ".status.labelSelector" + specReplicasPath: ".spec.replicas" + statusReplicasPath: ".status.replicas" + status: {} diff --git a/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/kafkanodepools.rs b/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/kafkanodepools.rs new file mode 100644 index 000000000..a38398470 --- /dev/null +++ b/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/kafkanodepools.rs @@ -0,0 +1,1208 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/strimzi/strimzi-kafka-operator/kafka.strimzi.io/v1beta2/kafkanodepools.yaml --derive=Default --derive=PartialEq --smart-derive-elision +// kopium version: 0.20.1 + +#[allow(unused_imports)] +mod prelude { + pub use kube::CustomResource; + pub use serde::{Serialize, Deserialize}; + pub use std::collections::BTreeMap; + pub use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + pub use k8s_openapi::apimachinery::pkg::apis::meta::v1::Condition; +} +use self::prelude::*; + +/// The specification of the KafkaNodePool. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "kafka.strimzi.io", version = "v1beta2", kind = "KafkaNodePool", plural = "kafkanodepools")] +#[kube(namespaced)] +#[kube(status = "KafkaNodePoolStatus")] +#[kube(schema = "disabled")] +#[kube(derive="PartialEq")] +pub struct KafkaNodePoolSpec { + /// JVM Options for pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "jvmOptions")] + pub jvm_options: Option, + /// The number of pods in the pool. + pub replicas: i64, + /// CPU and memory resources to reserve. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option, + /// The roles that the nodes in this pool will have when KRaft mode is enabled. Supported values are 'broker' and 'controller'. This field is required. When KRaft mode is disabled, the only allowed value if `broker`. + pub roles: Vec, + /// Storage configuration (disk). Cannot be updated. + pub storage: KafkaNodePoolStorage, + /// Template for pool resources. The template allows users to specify how the resources belonging to this pool are generated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub template: Option, +} + +/// JVM Options for pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolJvmOptions { + /// A map of -XX options to the JVM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "-XX")] + pub xx: Option>, + /// -Xms option to to the JVM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "-Xms")] + pub xms: Option, + /// -Xmx option to to the JVM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "-Xmx")] + pub xmx: Option, + /// Specifies whether the Garbage Collection logging is enabled. The default is false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gcLoggingEnabled")] + pub gc_logging_enabled: Option, + /// A map of additional system properties which will be passed using the `-D` option to the JVM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "javaSystemProperties")] + pub java_system_properties: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolJvmOptionsJavaSystemProperties { + /// The system property name. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// The system property value. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// CPU and memory resources to reserve. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolResources { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claims: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub limits: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolResourcesClaims { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// Storage configuration (disk). Cannot be updated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct KafkaNodePoolStorage { + /// The storage class to use for dynamic volume allocation. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub class: Option, + /// Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deleteClaim")] + pub delete_claim: Option, + /// Storage identification number. It is mandatory only for storage volumes defined in a storage of type 'jbod'. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kraftMetadata")] + pub kraft_metadata: Option, + /// Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub overrides: Option>, + /// Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option>, + /// When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub size: Option, + /// When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sizeLimit")] + pub size_limit: Option, + /// Storage type, must be either 'ephemeral', 'persistent-claim', or 'jbod'. + #[serde(rename = "type")] + pub r#type: KafkaNodePoolStorageType, + /// List of volumes as Storage objects representing the JBOD disks array. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub volumes: Option>, +} + +/// Storage configuration (disk). Cannot be updated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KafkaNodePoolStorageKraftMetadata { + #[serde(rename = "shared")] + Shared, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolStorageOverrides { + /// Id of the kafka broker (broker identifier). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub broker: Option, + /// The storage class to use for dynamic volume allocation for this broker. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub class: Option, +} + +/// Storage configuration (disk). Cannot be updated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KafkaNodePoolStorageType { + #[serde(rename = "ephemeral")] + Ephemeral, + #[serde(rename = "persistent-claim")] + PersistentClaim, + #[serde(rename = "jbod")] + Jbod, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct KafkaNodePoolStorageVolumes { + /// The storage class to use for dynamic volume allocation. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub class: Option, + /// Specifies if the persistent volume claim has to be deleted when the cluster is un-deployed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deleteClaim")] + pub delete_claim: Option, + /// Storage identification number. Mandatory for storage volumes defined with a `jbod` storage type configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// Specifies whether this volume should be used for storing KRaft metadata. This property is optional. When set, the only currently supported value is `shared`. At most one volume can have this property set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kraftMetadata")] + pub kraft_metadata: Option, + /// Overrides for individual brokers. The `overrides` field allows you to specify a different configuration for different brokers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub overrides: Option>, + /// Specifies a specific persistent volume to use. It contains key:value pairs representing labels for selecting such a volume. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option>, + /// When `type=persistent-claim`, defines the size of the persistent volume claim, such as 100Gi. Mandatory when `type=persistent-claim`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub size: Option, + /// When type=ephemeral, defines the total amount of local storage required for this EmptyDir volume (for example 1Gi). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sizeLimit")] + pub size_limit: Option, + /// Storage type, must be either 'ephemeral' or 'persistent-claim'. + #[serde(rename = "type")] + pub r#type: KafkaNodePoolStorageVolumesType, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KafkaNodePoolStorageVolumesKraftMetadata { + #[serde(rename = "shared")] + Shared, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolStorageVolumesOverrides { + /// Id of the kafka broker (broker identifier). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub broker: Option, + /// The storage class to use for dynamic volume allocation for this broker. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub class: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KafkaNodePoolStorageVolumesType { + #[serde(rename = "ephemeral")] + Ephemeral, + #[serde(rename = "persistent-claim")] + PersistentClaim, +} + +/// Template for pool resources. The template allows users to specify how the resources belonging to this pool are generated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplate { + /// Template for the Kafka init container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "initContainer")] + pub init_container: Option, + /// Template for the Kafka broker container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaContainer")] + pub kafka_container: Option, + /// Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "perPodIngress")] + pub per_pod_ingress: Option, + /// Template for Kafka per-pod `Routes` used for access from outside of OpenShift. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "perPodRoute")] + pub per_pod_route: Option, + /// Template for Kafka per-pod `Services` used for access from outside of Kubernetes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "perPodService")] + pub per_pod_service: Option, + /// Template for all Kafka `PersistentVolumeClaims`. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "persistentVolumeClaim")] + pub persistent_volume_claim: Option, + /// Template for Kafka `Pods`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pod: Option, + /// Template for Kafka `StrimziPodSet` resource. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podSet")] + pub pod_set: Option, +} + +/// Template for the Kafka init container. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainer { + /// Environment variables which should be applied to the container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub env: Option>, + /// Security context for the container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] + pub security_context: Option, + /// Additional volume mounts which should be applied to the container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] + pub volume_mounts: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerEnv { + /// The environment variable key. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// The environment variable value. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// Security context for the container. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContext { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] + pub allow_privilege_escalation: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "appArmorProfile")] + pub app_armor_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub capabilities: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub privileged: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] + pub proc_mount: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] + pub read_only_root_filesystem: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] + pub run_as_group: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] + pub run_as_non_root: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] + pub run_as_user: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] + pub se_linux_options: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] + pub seccomp_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] + pub windows_options: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContextAppArmorProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContextCapabilities { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub add: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub drop: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContextSeLinuxOptions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub level: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub role: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContextSeccompProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerSecurityContextWindowsOptions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] + pub gmsa_credential_spec: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] + pub gmsa_credential_spec_name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] + pub host_process: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] + pub run_as_user_name: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateInitContainerVolumeMounts { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPath")] + pub mount_path: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] + pub mount_propagation: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] + pub read_only: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "recursiveReadOnly")] + pub recursive_read_only: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] + pub sub_path: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] + pub sub_path_expr: Option, +} + +/// Template for the Kafka broker container. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainer { + /// Environment variables which should be applied to the container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub env: Option>, + /// Security context for the container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] + pub security_context: Option, + /// Additional volume mounts which should be applied to the container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] + pub volume_mounts: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerEnv { + /// The environment variable key. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// The environment variable value. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// Security context for the container. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContext { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] + pub allow_privilege_escalation: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "appArmorProfile")] + pub app_armor_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub capabilities: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub privileged: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] + pub proc_mount: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] + pub read_only_root_filesystem: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] + pub run_as_group: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] + pub run_as_non_root: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] + pub run_as_user: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] + pub se_linux_options: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] + pub seccomp_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] + pub windows_options: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContextAppArmorProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContextCapabilities { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub add: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub drop: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContextSeLinuxOptions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub level: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub role: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContextSeccompProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerSecurityContextWindowsOptions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] + pub gmsa_credential_spec: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] + pub gmsa_credential_spec_name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] + pub host_process: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] + pub run_as_user_name: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplateKafkaContainerVolumeMounts { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPath")] + pub mount_path: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] + pub mount_propagation: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] + pub read_only: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "recursiveReadOnly")] + pub recursive_read_only: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] + pub sub_path: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] + pub sub_path_expr: Option, +} + +/// Template for Kafka per-pod `Ingress` used for access from outside of Kubernetes. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodIngress { + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodIngressMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Template for Kafka per-pod `Routes` used for access from outside of OpenShift. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodRoute { + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodRouteMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Template for Kafka per-pod `Services` used for access from outside of Kubernetes. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodService { + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePerPodServiceMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Template for all Kafka `PersistentVolumeClaims`. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePersistentVolumeClaim { + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePersistentVolumeClaimMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Template for Kafka `Pods`. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePod { + /// The pod's affinity rules. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// Indicates whether information about services should be injected into Pod's environment variables. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableServiceLinks")] + pub enable_service_links: Option, + /// The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostAliases")] + pub host_aliases: Option>, + /// List of references to secrets in the same namespace to use for pulling any of the images used by this Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` environment variable in Cluster Operator and the `imagePullSecrets` option are specified, only the `imagePullSecrets` variable is used and the `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullSecrets")] + pub image_pull_secrets: Option>, + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// The name of the priority class used to assign priority to the pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "priorityClassName")] + pub priority_class_name: Option, + /// The name of the scheduler used to dispatch this `Pod`. If not specified, the default scheduler will be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "schedulerName")] + pub scheduler_name: Option, + /// Configures pod-level security attributes and common container settings. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] + pub security_context: Option, + /// The grace period is the duration in seconds after the processes running in the pod are sent a termination signal, and the time when the processes are forcibly halted with a kill signal. Set this value to longer than the expected cleanup time for your process. Value must be a non-negative integer. A zero value indicates delete immediately. You might need to increase the grace period for very large Kafka clusters, so that the Kafka brokers have enough time to transfer their work to another broker before they are terminated. Defaults to 30 seconds. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] + pub termination_grace_period_seconds: Option, + /// Defines the total amount of pod memory allocated for the temporary `EmptyDir` volume `/tmp`. Specify the allocation in memory units, for example, `100Mi` for 100 mebibytes. Default value is `5Mi`. The `/tmp` volume is backed by pod memory, not disk storage, so avoid setting a high value as it consumes pod memory resources. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tmpDirSizeLimit")] + pub tmp_dir_size_limit: Option, + /// The pod's tolerations. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + /// The pod's topology spread constraints. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologySpreadConstraints")] + pub topology_spread_constraints: Option>, + /// Additional volumes that can be mounted to the pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub volumes: Option>, +} + +/// The pod's affinity rules. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinity { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinity")] + pub node_affinity: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAffinity")] + pub pod_affinity: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAntiAffinity")] + pub pod_anti_affinity: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinity { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub preference: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub weight: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelectorTerms")] + pub node_selector_terms: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinity { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAffinityTerm")] + pub pod_affinity_term: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub weight: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyKey")] + pub topology_key: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyKey")] + pub topology_key: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinity { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAffinityTerm")] + pub pod_affinity_term: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub weight: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyKey")] + pub topology_key: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyKey")] + pub topology_key: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodHostAliases { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostnames: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ip: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodImagePullSecrets { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Configures pod-level security attributes and common container settings. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContext { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "appArmorProfile")] + pub app_armor_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroup")] + pub fs_group: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroupChangePolicy")] + pub fs_group_change_policy: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] + pub run_as_group: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] + pub run_as_non_root: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] + pub run_as_user: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] + pub se_linux_options: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] + pub seccomp_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "supplementalGroups")] + pub supplemental_groups: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sysctls: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] + pub windows_options: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContextAppArmorProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContextSeLinuxOptions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub level: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub role: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContextSeccompProfile { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] + pub localhost_profile: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContextSysctls { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSecurityContextWindowsOptions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] + pub gmsa_credential_spec: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] + pub gmsa_credential_spec_name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] + pub host_process: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] + pub run_as_user_name: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodTolerations { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub effect: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] + pub toleration_seconds: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodTopologySpreadConstraints { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxSkew")] + pub max_skew: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minDomains")] + pub min_domains: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinityPolicy")] + pub node_affinity_policy: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeTaintsPolicy")] + pub node_taints_policy: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyKey")] + pub topology_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "whenUnsatisfiable")] + pub when_unsatisfiable: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodTopologySpreadConstraintsLabelSelector { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodTopologySpreadConstraintsLabelSelectorMatchExpressions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumes { + /// ConfigMap to use to populate the volume. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMap")] + pub config_map: Option, + /// EmptyDir to use to populate the volume. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "emptyDir")] + pub empty_dir: Option, + /// Name to use for the volume. Required. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// PersistentVolumeClaim object to use to populate the volume. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "persistentVolumeClaim")] + pub persistent_volume_claim: Option, + /// Secret to use populate the volume. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub secret: Option, +} + +/// ConfigMap to use to populate the volume. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesConfigMap { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] + pub default_mode: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub items: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub optional: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesConfigMapItems { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mode: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, +} + +/// EmptyDir to use to populate the volume. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesEmptyDir { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub medium: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sizeLimit")] + pub size_limit: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesEmptyDirSizeLimit { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub amount: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub format: Option, +} + +/// PersistentVolumeClaim object to use to populate the volume. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesPersistentVolumeClaim { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "claimName")] + pub claim_name: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] + pub read_only: Option, +} + +/// Secret to use populate the volume. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesSecret { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] + pub default_mode: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub items: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub optional: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] + pub secret_name: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodVolumesSecretItems { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mode: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, +} + +/// Template for Kafka `StrimziPodSet` resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSet { + /// Metadata applied to the resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Metadata applied to the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolTemplatePodSetMetadata { + /// Annotations added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Labels added to the Kubernetes resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// The status of the KafkaNodePool. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KafkaNodePoolStatus { + /// Kafka cluster ID. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterId")] + pub cluster_id: Option, + /// List of status conditions. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// Label selector for pods providing this resource. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// Node IDs used by Kafka nodes in this pool. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeIds")] + pub node_ids: Option>, + /// The generation of the CRD that was last reconciled by the operator. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// The current number of pods being used to provide this resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// The roles currently assigned to this pool. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub roles: Option>, +} + diff --git a/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/mod.rs b/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/mod.rs index 6c4f6dc77..a8be02b48 100644 --- a/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/mod.rs +++ b/kube-custom-resources-rs/src/kafka_strimzi_io/v1beta2/mod.rs @@ -2,6 +2,7 @@ pub mod kafkabridges; pub mod kafkaconnectors; pub mod kafkaconnects; pub mod kafkamirrormakers; +pub mod kafkanodepools; pub mod kafkarebalances; pub mod kafkas; pub mod kafkatopics; diff --git a/kube-custom-resources-rs/src/lib.rs b/kube-custom-resources-rs/src/lib.rs index 46852d7c4..53a8d05be 100644 --- a/kube-custom-resources-rs/src/lib.rs +++ b/kube-custom-resources-rs/src/lib.rs @@ -1676,6 +1676,7 @@ apiVersion `kafka.strimzi.io/v1beta2`: - `KafkaConnector` - `KafkaConnect` - `KafkaMirrorMaker` +- `KafkaNodePool` - `KafkaRebalance` - `Kafka` - `KafkaTopic`