diff --git a/code-generator/create-custom-resources.sh b/code-generator/create-custom-resources.sh index 565732f88..3563a02ae 100755 --- a/code-generator/create-custom-resources.sh +++ b/code-generator/create-custom-resources.sh @@ -16,22 +16,23 @@ for file in ./crd-catalog/**/*.yaml; do path="${file%.*}" ignore_file="${path}.ignore" + args_file="${path}.args" + crd=$(basename "${path}") + version=$(basename "$(dirname "${file}")") + group=$(basename "$(dirname "$(dirname "${file}")")") + resource_filename=$(echo "${crd}" | sed -e 's/\./_/g' -e 's/-/_/g') + cargo_group=$(echo "${group}" | sed -e 's/\./_/g' -e 's/-/_/g') + cargo_feature="${cargo_group}" + feature_directory="./kube-custom-resources-rs/src/${cargo_feature}" + version_directory="${feature_directory}/${version}" - if [ ! -f "${ignore_file}" ]; then - args="${path}.args" - crd=$(basename "${path}") - version=$(basename "$(dirname "${file}")") - group=$(basename "$(dirname "$(dirname "${file}")")") - resource_filename=$(echo "${crd}" | sed -e 's/\./_/g' -e 's/-/_/g') - cargo_group=$(echo "${group}" | sed -e 's/\./_/g' -e 's/-/_/g') - cargo_feature="${cargo_group}" - feature_directory="./kube-custom-resources-rs/src/${cargo_feature}" - version_directory="${feature_directory}/${version}" + if [ -f "${ignore_file}" ]; then + rm --force "${version_directory}/${resource_filename}.rs" + else + mkdir --parents "${version_directory}" - mkdir --parents "${feature_directory}/${version}" - - if [ -f "${args}" ]; then - if ! xargs --arg-file="${args}" --delimiter='\n' kopium --docs --filename="${file}" > "${version_directory}/${resource_filename}.rs"; then + if [ -f "${args_file}" ]; then + if ! xargs --arg-file="${args_file}" --delimiter='\n' kopium --docs --filename="${file}" > "${version_directory}/${resource_filename}.rs"; then echo " error in ${file}" fi else diff --git a/code-generator/generate.sh b/code-generator/generate.sh index c94c02e9c..ce856fb93 100755 --- a/code-generator/generate.sh +++ b/code-generator/generate.sh @@ -12,16 +12,7 @@ cargo run --package code-generator --bin crd_v1_fetcher "${FILTER}" cargo run --package code-generator --bin dep5_generator # fix YAMLs -shopt -s globstar nullglob -for file in ./crd-catalog/**/fixup.sh; do - if [ -n "${FILTER}" ]; then - if ! echo -n "${file}" | grep --quiet "${FILTER}"; then - continue - fi - fi - - "${file}" -done +./code-generator/run-fixups.sh "${FILTER}" # generate Rust code ./code-generator/create-custom-resources.sh "${FILTER}" diff --git a/code-generator/run-fixups.sh b/code-generator/run-fixups.sh new file mode 100755 index 000000000..471f22e7e --- /dev/null +++ b/code-generator/run-fixups.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: The kube-custom-resources-rs Authors +# SPDX-License-Identifier: 0BSD + +FILTER="${1:-}" + +# fix YAMLs +shopt -s globstar nullglob +for file in ./crd-catalog/**/fixup.sh; do + if [ -n "${FILTER}" ]; then + if ! echo -n "${file}" | grep --quiet "${FILTER}"; then + continue + fi + fi + + "${file}" +done diff --git a/code-generator/src/catalog.rs b/code-generator/src/catalog.rs index a0a032af7..36e6f7670 100644 --- a/code-generator/src/catalog.rs +++ b/code-generator/src/catalog.rs @@ -1761,12 +1761,91 @@ pub const CRD_V1_SOURCES: &'static [UpstreamSource] = &[ project_name: "openshift/api", license: APACHE_V2, urls: &[ + "https://github.com/openshift/api/blob/master/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml", + "https://github.com/openshift/api/blob/master/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_03_config-operator_01_proxy.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_03_marketplace-operator_01_operatorhub.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_authentication.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_console.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_featuregate.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_image.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_imagedigestmirrorset.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_imagetagmirrorset.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_ingress.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_network.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_node.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_oauth.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_project.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_config-operator_01_scheduler.crd.yaml", + "https://github.com/openshift/api/blob/master/config/v1/0000_10_openshift-controller-manager-operator_01_build.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consoleclidownload.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consoleexternalloglink.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consolelink.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consolenotification.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consolequickstart.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consolesample.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/00_consoleyamlsample.crd.yaml", + "https://github.com/openshift/api/blob/master/console/v1/90_consoleplugin.crd.yaml", + "https://github.com/openshift/api/blob/master/example/v1/0000_50_stabletype-Default.crd.yaml", + "https://github.com/openshift/api/blob/master/helm/v1beta1/00_helm-chart-repository.crd.yaml", + "https://github.com/openshift/api/blob/master/helm/v1beta1/00_project-helm-chart-repository.crd.yaml", + "https://github.com/openshift/api/blob/master/imageregistry/v1/00_imageregistry.crd.yaml", + "https://github.com/openshift/api/blob/master/imageregistry/v1/01_imagepruner.crd.yaml", + "https://github.com/openshift/api/blob/master/insights/v1alpha1/0000_10_01_datagather.crd.yaml", + "https://github.com/openshift/api/blob/master/machine/v1/0000_10_controlplanemachineset-Default.crd.yaml", + "https://github.com/openshift/api/blob/master/machine/v1beta1/0000_10_machine.crd.yaml", + "https://github.com/openshift/api/blob/master/machine/v1beta1/0000_10_machinehealthcheck.yaml", + "https://github.com/openshift/api/blob/master/machine/v1beta1/0000_10_machineset.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1/0000_80_containerruntimeconfig.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1/0000_80_controllerconfig.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1/0000_80_kubeletconfig.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1/0000_80_machineconfig.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1/0000_80_machineconfigpool.crd.yaml", "https://github.com/openshift/api/blob/master/machineconfiguration/v1alpha1/0000_80_machineconfignode-CustomNoUpgrade.crd.yaml", + "https://github.com/openshift/api/blob/master/monitoring/v1/0000_50_monitoring_01_alertingrules.crd.yaml", + "https://github.com/openshift/api/blob/master/monitoring/v1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml", + "https://github.com/openshift/api/blob/master/network/v1/001-clusternetwork-crd.yaml", + "https://github.com/openshift/api/blob/master/network/v1/002-hostsubnet-crd.yaml", + "https://github.com/openshift/api/blob/master/network/v1/003-netnamespace-crd.yaml", + "https://github.com/openshift/api/blob/master/network/v1/004-egressnetworkpolicy-crd.yaml", + "https://github.com/openshift/api/blob/master/networkoperator/v1/001-egressrouter.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_10_config-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_12_etcd-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_insights-operator_00-insightsoperator.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_50_service-ca-operator_02_crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_70_cluster-network-operator_01.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_70_dns-operator_00.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_80_machine-config-operator_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml", + "https://github.com/openshift/api/blob/master/operator/v1/00_console-operator.crd.yaml", + "https://github.com/openshift/api/blob/master/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml", + "https://github.com/openshift/api/blob/master/operatoringress/v1/0000_50_dns-record.yaml", + "https://github.com/openshift/api/blob/master/platform/v1alpha1/platformoperators.crd.yaml", + "https://github.com/openshift/api/blob/master/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml", + "https://github.com/openshift/api/blob/master/route/v1/route.crd.yaml", + "https://github.com/openshift/api/blob/master/samples/v1/00_samplesconfig.crd.yaml", + "https://github.com/openshift/api/blob/master/security/v1/0000_03_security-openshift_01_scc.crd.yaml", + "https://github.com/openshift/api/blob/master/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml", + "https://github.com/openshift/api/blob/master/securityinternal/v1/0000_03_securityinternal-openshift_02_rangeallocation.crd.yaml-patch", + "https://github.com/openshift/api/blob/master/sharedresource/v1alpha1/0000_10_sharedconfigmap.crd.yaml", + "https://github.com/openshift/api/blob/master/sharedresource/v1alpha1/0000_10_sharedsecret.crd.yaml", ], }, UpstreamSource { diff --git a/crd-catalog/openshift/api/authorization.openshift.io/v1/rolebindingrestrictions.yaml b/crd-catalog/openshift/api/authorization.openshift.io/v1/rolebindingrestrictions.yaml new file mode 100644 index 000000000..9a9621689 --- /dev/null +++ b/crd-catalog/openshift/api/authorization.openshift.io/v1/rolebindingrestrictions.yaml @@ -0,0 +1,158 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: rolebindingrestrictions.authorization.openshift.io +spec: + group: authorization.openshift.io + names: + kind: RoleBindingRestriction + listKind: RoleBindingRestrictionList + plural: rolebindingrestrictions + singular: rolebindingrestriction + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the matcher. + properties: + grouprestriction: + description: GroupRestriction matches against group subjects. + nullable: true + properties: + groups: + description: Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role. + items: + type: string + nullable: true + type: array + labels: + description: Selectors specifies a list of label selectors over group labels. + items: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + type: object + serviceaccountrestriction: + description: ServiceAccountRestriction matches against service-account subjects. + nullable: true + properties: + namespaces: + description: Namespaces specifies a list of literal namespace names. + items: + type: string + type: array + serviceaccounts: + description: ServiceAccounts specifies a list of literal service-account names. + items: + description: ServiceAccountReference specifies a service account and namespace by their names. + properties: + name: + description: Name is the name of the service account. + type: string + namespace: + description: Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used. + type: string + type: object + type: array + type: object + userrestriction: + description: UserRestriction matches against user subjects. + nullable: true + properties: + groups: + description: Groups specifies a list of literal group names. + items: + type: string + nullable: true + type: array + labels: + description: Selectors specifies a list of label selectors over user labels. + items: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + users: + description: Users specifies a list of literal user names. + items: + type: string + type: array + type: object + type: object + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.args b/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.yaml b/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.yaml new file mode 100644 index 000000000..6e3672a1b --- /dev/null +++ b/crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.yaml @@ -0,0 +1,107 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/859 + name: cloudprivateipconfigs.cloud.network.openshift.io +spec: + group: cloud.network.openshift.io + names: + kind: CloudPrivateIPConfig + listKind: CloudPrivateIPConfigList + plural: cloudprivateipconfigs + singular: cloudprivateipconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "CloudPrivateIPConfig performs an assignment of a private IP address to the primary NIC associated with cloud VMs. This is done by specifying the IP and Kubernetes node which the IP should be assigned to. This CRD is intended to be used by the network plugin which manages the cluster network. The spec side represents the desired state requested by the network plugin, and the status side represents the current state that this CRD's controller has executed. No users will have permission to modify it, and if a cluster-admin decides to edit it for some reason, their changes will be overwritten the next time the network plugin reconciles the object. Note: the CR's name must specify the requested private IP address (can be IPv4 or IPv6). \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + anyOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + spec: + description: spec is the definition of the desired private IP request. + properties: + node: + description: 'node is the node name, as specified by the Kubernetes field: node.metadata.name' + type: string + type: object + status: + description: status is the observed status of the desired private IP request. Read-only. + properties: + conditions: + description: condition is the assignment condition of the private IP and its status + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + node: + description: 'node is the node name, as specified by the Kubernetes field: node.metadata.name' + type: string + required: + - conditions + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.args b/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.yaml new file mode 100644 index 000000000..74cc6df2e --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/apiservers.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + items: + type: string + type: array + audit: + default: + profile: Default + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + minLength: 1 + type: string + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + type: string + required: + - group + - profile + type: object + type: array + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + default: Default + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + type: string + type: object + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + enum: + - '' + - identity + - aescbc + - aesgcm + type: string + type: object + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + items: + type: string + type: array + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + type: array + type: object + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + nullable: true + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + nullable: true + type: object + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + nullable: true + type: object + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + nullable: true + type: object + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + enum: + - Old + - Intermediate + - Modern + - Custom + type: string + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/authentications.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/authentications.yaml new file mode 100644 index 000000000..44e09f96e --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/authentications.yaml @@ -0,0 +1,103 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. \n Can only be set if \"Type\" is set to \"None\"." + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + required: + - kubeConfig + type: object + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/builds.ignore b/crd-catalog/openshift/api/config.openshift.io/v1/builds.ignore new file mode 100644 index 000000000..b85a00322 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/builds.ignore @@ -0,0 +1 @@ +cannot find type `BuildStatus` in this scope diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/builds.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/builds.yaml new file mode 100644 index 000000000..129367891 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/builds.yaml @@ -0,0 +1,291 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + capability.openshift.io/name: Build + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: builds.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Build + listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. \n The canonical name is \"cluster\" \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the build controller configuration + properties: + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config. \n DEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead." + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + buildDefaults: + description: BuildDefaults controls the default information for Builds + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download. \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy." + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + items: + type: string + type: array + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + type: object + env: + description: Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + gitProxy: + description: "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone. \n Values that are not set here will be inherited from DefaultProxy." + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + items: + type: string + type: array + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + type: object + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig. + items: + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + type: object + type: array + resources: + description: Resources defines resource requirements to execute the build. + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + buildOverrides: + description: BuildOverrides controls override settings for builds + properties: + forcePull: + description: ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself + type: boolean + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten. + items: + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: NodeSelector is a selector which must be true for the build pod to fit on a node + type: object + tolerations: + description: Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/clusteroperators.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/clusteroperators.yaml new file mode 100644 index 000000000..9c81181d3 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/clusteroperators.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: clusteroperators.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterOperator + listKind: ClusterOperatorList + plural: clusteroperators + shortNames: + - co + singular: clusteroperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem. + properties: + conditions: + description: conditions describes the state of the operator's managed and monitored components. + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + format: date-time + type: string + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + extension: + description: extension contains any additional status information specific to the operator which owns this status object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces' + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + type: array + versions: + description: versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name "operator". An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + items: + properties: + name: + description: name is the name of the particular operand this version is for. It usually matches container images, not operators. + type: string + version: + description: version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + required: + - name + - version + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.args b/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.yaml new file mode 100644 index 000000000..7f1f5a3a8 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.yaml @@ -0,0 +1,457 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. + properties: + capabilities: + description: capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. + properties: + additionalEnabledCapabilities: + description: additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set. + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + type: string + type: array + x-kubernetes-list-type: atomic + baselineCapabilitySet: + description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. + enum: + - None + - v4.11 + - v4.12 + - v4.13 + - v4.14 + - v4.15 + - vCurrent + type: string + type: object + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. \n Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + properties: + architecture: + description: architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty. + enum: + - Multi + - '' + type: string + force: + description: force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + type: boolean + image: + description: image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified. + type: string + version: + description: version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified. + type: string + type: object + x-kubernetes-validations: + - message: cannot set both Architecture and Image + rule: 'has(self.architecture) && has(self.image) ? (self.architecture == '''' || self.image == '''') : true' + - message: Version must be set if Architecture is set + rule: 'has(self.architecture) && self.architecture != '''' ? self.version != '''' : true' + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + required: + - group + - kind + - name + - namespace + - unmanaged + type: object + type: array + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + required: + - clusterID + type: object + status: + description: status contains information about the available updates and any in-progress updates. + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + items: + description: Release represents an OpenShift release image and associated metadata. + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + items: + type: string + type: array + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + type: object + nullable: true + type: array + capabilities: + description: capabilities describes the state of optional, core cluster components. + properties: + enabledCapabilities: + description: enabledCapabilities lists all the capabilities that are currently managed. + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + type: string + type: array + x-kubernetes-list-type: atomic + knownCapabilities: + description: knownCapabilities lists all the capabilities known to the current cluster. + items: + description: ClusterVersionCapability enumerates optional, core cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + type: string + type: array + x-kubernetes-list-type: atomic + type: object + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + items: + type: string + type: array + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + type: object + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + required: + - promql + type: object + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + enum: + - Always + - PromQL + type: string + required: + - type + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + minLength: 1 + type: string + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + minLength: 1 + type: string + url: + description: url contains information about this risk. + format: uri + minLength: 1 + type: string + required: + - matchingRules + - message + - name + - url + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - release + - risks + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + format: date-time + type: string + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + items: + type: string + type: array + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + type: object + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + items: + description: UpdateHistory is a single attempted update to the cluster. + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + format: date-time + nullable: true + type: string + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + format: date-time + type: string + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + type: boolean + version: + description: version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + required: + - completionTime + - image + - startedTime + - state + - verified + type: object + type: array + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + format: int64 + type: integer + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + type: string + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability + rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' + - message: the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability + rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) : true' + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/consoles.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/consoles.yaml new file mode 100644 index 000000000..442f7d4a4 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/consoles.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consoles.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration for console authentication. + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user''s token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.' + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + type: string + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + consoleURL: + description: The URL for the console. This will be derived from the host for the route that is created for the console. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/dnses.args b/crd-catalog/openshift/api/config.openshift.io/v1/dnses.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/dnses.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/dnses.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/dnses.yaml new file mode 100644 index 000000000..46c662541 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/dnses.yaml @@ -0,0 +1,114 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." + type: string + platform: + description: platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains DNS configuration specific to the Amazon Web Services cloud provider. + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: string + type: object + type: + description: "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\". \n Individual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults." + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : !has(self.aws)' + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.args b/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.yaml new file mode 100644 index 000000000..ef16e26e0 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/featuregates.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: featuregates.config.openshift.io +spec: + group: config.openshift.io + names: + kind: FeatureGate + listKind: FeatureGateList + plural: featuregates + singular: featuregate + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Feature holds cluster-wide information about feature gates. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + nullable: true + properties: + disabled: + description: disabled is a list of all feature gates that you want to force off + items: + description: FeatureGateName is a string to enforce patterns on the name of a FeatureGate + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: string + type: array + enabled: + description: enabled is a list of all feature gates that you want to force on + items: + description: FeatureGateName is a string to enforce patterns on the name of a FeatureGate + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: string + type: array + type: object + featureSet: + description: featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + type: string + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: 'conditions represent the observations of the current state. Known .status.conditions.type are: "DeterminationDegraded"' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + featureGates: + description: featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list. + items: + properties: + disabled: + description: disabled is a list of all feature gates that are disabled in the cluster for the named version. + items: + properties: + name: + description: name is the name of the FeatureGate. + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: string + required: + - name + type: object + type: array + enabled: + description: enabled is a list of all feature gates that are enabled in the cluster for the named version. + items: + properties: + name: + description: name is the name of the FeatureGate. + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: string + required: + - name + type: object + type: array + version: + description: version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + type: string + required: + - version + type: object + type: array + x-kubernetes-list-map-keys: + - version + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.ignore b/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.ignore new file mode 100644 index 000000000..fc386f659 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.ignore @@ -0,0 +1 @@ +cannot find type `ImageContentPolicyStatus` in this scope diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.yaml new file mode 100644 index 000000000..3b511a11b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/imagecontentpolicies.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/874 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: imagecontentpolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageContentPolicy + listKind: ImageContentPolicyList + plural: imagecontentpolicies + singular: imagecontentpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\". \n Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified." + items: + description: RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. + properties: + allowMirrorByTags: + description: allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue. + type: boolean + mirrors: + description: mirrors is zero or more repositories that may also contain the same images. If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. + items: + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + type: string + type: array + x-kubernetes-list-type: set + source: + description: source is the repository that users refer to, e.g. in image pull specifications. + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + type: string + required: + - source + type: object + type: array + x-kubernetes-list-map-keys: + - source + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/imagedigestmirrorsets.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/imagedigestmirrorsets.yaml new file mode 100644 index 000000000..deced22a6 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/imagedigestmirrorsets.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1126 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: imagedigestmirrorsets.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageDigestMirrorSet + listKind: ImageDigestMirrorSetList + plural: imagedigestmirrorsets + shortNames: + - idms + singular: imagedigestmirrorset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + imageDigestMirrors: + description: "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD. \n If the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order." + items: + description: ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. + properties: + mirrorSourcePolicy: + description: mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + enum: + - NeverContactSource + - AllowContactingSource + type: string + mirrors: + description: 'mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user''s desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' + items: + pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ + type: string + type: array + x-kubernetes-list-type: set + source: + description: 'source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' + pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ + type: string + required: + - source + type: object + type: array + x-kubernetes-list-type: atomic + type: object + status: + description: status contains the observed state of the resource. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/images.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/images.yaml new file mode 100644 index 000000000..e07cdcaae --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/images.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: images.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Image + listKind: ImageList + plural: images + singular: image + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions. + items: + description: RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'. + properties: + domainName: + description: domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure. + type: boolean + type: object + type: array + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + items: + type: string + type: array + registrySources: + description: registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. + properties: + allowedRegistries: + description: "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. \n Only one of BlockedRegistries or AllowedRegistries may be set." + items: + type: string + type: array + blockedRegistries: + description: "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. \n Only one of BlockedRegistries or AllowedRegistries may be set." + items: + type: string + type: array + containerRuntimeSearchRegistries: + description: 'containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.' + format: hostname + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + insecureRegistries: + description: insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + items: + type: string + type: array + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + items: + type: string + type: array + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/imagetagmirrorsets.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/imagetagmirrorsets.yaml new file mode 100644 index 000000000..fc4600859 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/imagetagmirrorsets.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1126 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: imagetagmirrorsets.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageTagMirrorSet + listKind: ImageTagMirrorSetList + plural: imagetagmirrorsets + shortNames: + - itms + singular: imagetagmirrorset + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + imageTagMirrors: + description: "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD. \n If the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order." + items: + description: ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. + properties: + mirrorSourcePolicy: + description: mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + enum: + - NeverContactSource + - AllowContactingSource + type: string + mirrors: + description: 'mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user''s desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' + items: + pattern: ^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ + type: string + type: array + x-kubernetes-list-type: set + source: + description: 'source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table' + pattern: ^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$ + type: string + required: + - source + type: object + type: array + x-kubernetes-list-type: atomic + type: object + status: + description: status contains the observed state of the resource. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.args b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml new file mode 100644 index 000000000..6320d3d28 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml @@ -0,0 +1,1113 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + properties: + failureDomains: + description: failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. + items: + description: NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + failureDomains: + description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. + items: + description: VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. + properties: + name: + description: name defines the arbitrary but unique name of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + resourcePool: + description: resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) : true' + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeNetworking: + description: nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. + properties: + external: + description: external represents the network configuration of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + internal: + description: internal represents the network configuration of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + type: object + vcenters: + description: vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology. + items: + type: string + minItems: 1 + type: array + port: + description: port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time. + format: int32 + maximum: 32767.0 + minimum: 1.0 + type: integer + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are "None" and "AllNodes". When omitted, the default value is "None". The default value of "None" indicates that no nodes will be setup with CPU partitioning. The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba Cloud resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource group for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + items: + description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + items: + description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + enum: + - '' + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + type: object + external: + description: External contains settings specific to the generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n Valid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." + enum: + - '' + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created for the cluster. + type: string + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services. + items: + description: IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. + properties: + name: + description: 'name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + cloudName: + description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won''t be able to configure storage, which results in the image registry cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + zone: + description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + machineNetworks: + description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.args b/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.yaml new file mode 100644 index 000000000..1c4a7350f --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/ingresses.yaml @@ -0,0 +1,334 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: ingresses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list. \n To determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes." + items: + description: ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. + properties: + hostname: + description: hostname is the hostname that should be used by the route. + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + name: + description: "name is the logical name of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + maxLength: 256 + minLength: 1 + type: string + namespace: + description: "namespace is the namespace of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + required: + - hostname + - name + - namespace + type: object + type: array + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + domain: + description: "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\". \n It is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\". \n Once set, changing domain is not currently supported." + type: string + loadBalancer: + description: loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift. + properties: + platform: + description: platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains settings specific to the Amazon Web Services infrastructure provider. + properties: + type: + description: "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are: \n * \"Classic\": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb \n * \"NLB\": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - NLB + - Classic + type: string + required: + - type + type: object + type: + description: type is the underlying infrastructure provider for the cluster. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + type: object + type: object + requiredHSTSPolicies: + description: "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission. \n A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains \n - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation. \n The HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. \n Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid." + items: + properties: + domainPatterns: + description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*." + items: + type: string + minItems: 1 + type: array + includeSubDomainsPolicy: + description: 'includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host''s domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' + enum: + - RequireIncludeSubDomains + - RequireNoIncludeSubDomains + - NoOpinion + type: string + maxAge: + description: maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client. + properties: + largestMaxAge: + description: The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. + format: int32 + maximum: 2147483647.0 + minimum: 0.0 + type: integer + smallestMaxAge: + description: The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. + format: int32 + maximum: 2147483647.0 + minimum: 0.0 + type: integer + type: object + namespaceSelector: + description: namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + preloadPolicy: + description: preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent). + enum: + - RequirePreload + - RequireNoPreload + - NoOpinion + type: string + required: + - domainPatterns + type: object + type: array + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + componentRoutes: + description: componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin. + items: + description: ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. + properties: + conditions: + description: "conditions are used to communicate the state of the componentRoutes entry. \n Supported conditions include Available, Degraded and Progressing. \n If available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured. \n If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect. \n If Progressing is true, that means the component is taking some action related to the componentRoutes entry." + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 5 + type: array + currentHostnames: + description: currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + items: + description: "Hostname is an alias for hostname string validation. \n The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ \n The right operand of the | is a new pattern that mimics the current API route admission validation on hostname, except that it allows hostnames longer than the maximum length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ \n Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname was saved via validation by the incorrect left operand of the | operator." + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + minItems: 1 + type: array + defaultHostname: + description: defaultHostname is the hostname of this route prior to customization. + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + name: + description: "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + maxLength: 256 + minLength: 1 + type: string + namespace: + description: "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + relatedObjects: + description: relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + minItems: 1 + type: array + required: + - defaultHostname + - name + - namespace + - relatedObjects + type: object + type: array + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + defaultPlacement: + description: "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes. \n This field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments. \n See the documentation for the IngressController replicas and nodePlacement fields for more information. \n When omitted, the default value is Workers" + enum: + - ControlPlane + - Workers + - '' + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/networks.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/networks.yaml new file mode 100644 index 000000000..159719285 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/networks.yaml @@ -0,0 +1,163 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + format: int32 + minimum: 0.0 + type: integer + type: object + type: array + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + items: + type: string + type: array + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + items: + type: string + type: array + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + items: + type: string + type: array + type: object + type: object + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + items: + type: string + type: array + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + format: int32 + minimum: 0.0 + type: integer + type: object + type: array + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + migration: + description: Migration contains the cluster network migration configuration. + properties: + mtu: + description: MTU contains the MTU migration configuration. + properties: + machine: + description: Machine contains MTU migration configuration for the machine's uplink. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0.0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0.0 + type: integer + type: object + network: + description: Network contains MTU migration configuration for the default network. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0.0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0.0 + type: integer + type: object + type: object + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + enum: + - OpenShiftSDN + - OVNKubernetes + type: string + type: object + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + items: + type: string + type: array + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/nodes.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/nodes.yaml new file mode 100644 index 000000000..77de3cf0f --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/nodes.yaml @@ -0,0 +1,59 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1107 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: nodes.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Node + listKind: NodeList + plural: nodes + singular: node + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Node holds cluster-wide information about node specific features. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + cgroupMode: + description: CgroupMode determines the cgroups version on the node + enum: + - v1 + - v2 + - '' + type: string + workerLatencyProfile: + description: WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster + enum: + - Default + - MediumUpdateAverageReaction + - LowUpdateSlowReaction + type: string + type: object + status: + description: status holds observed values. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/oauths.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/oauths.yaml new file mode 100644 index 000000000..7c3943b95 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/oauths.yaml @@ -0,0 +1,444 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + items: + description: IdentityProvider provides identities for users authenticating using credentials + properties: + basicAuth: + description: basicAuth contains configuration options for the BasicAuth IdP + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the remote URL to connect to + type: string + type: object + github: + description: github enables user authentication using GitHub credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations are allowed to log in + items: + type: string + type: array + teams: + description: teams optionally restricts which teams are allowed to log in. Format is /. + items: + type: string + type: array + type: object + gitlab: + description: gitlab enables user authentication using GitLab credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the oauth server base URL + type: string + type: object + google: + description: google enables user authentication using Google credentials + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + hostedDomain: + description: hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + type: string + type: object + htpasswd: + description: htpasswd enables user authentication using an HTPasswd file to validate credentials + properties: + fileData: + description: fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + keystone: + description: keystone enables user authentication using keystone password credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the remote URL to connect to + type: string + type: object + ldap: + description: ldap enables user authentication using LDAP credentials + properties: + attributes: + description: attributes maps LDAP attributes to identities + properties: + email: + description: email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + items: + type: string + type: array + id: + description: id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + items: + type: string + type: array + name: + description: name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + items: + type: string + type: array + preferredUsername: + description: preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + items: + type: string + type: array + type: object + bindDN: + description: bindDN is an optional DN to bind with during the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + insecure: + description: 'insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + type: object + mappingMethod: + description: mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + claims: + description: claims mappings + properties: + email: + description: email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + items: + type: string + type: array + x-kubernetes-list-type: atomic + groups: + description: groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used. + items: + description: OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo responses + minLength: 1 + type: string + type: array + x-kubernetes-list-type: atomic + name: + description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + items: + type: string + type: array + x-kubernetes-list-type: atomic + preferredUsername: + description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + extraAuthorizeParameters: + additionalProperties: + type: string + description: extraAuthorizeParameters are any custom parameters to add to the authorize request. + type: object + extraScopes: + description: extraScopes are any scopes to request in addition to the standard "openid" scope. + items: + type: string + type: array + issuer: + description: issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + type: string + type: object + requestHeader: + description: requestHeader enables user authentication using request header credentials + properties: + ca: + description: ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + challengeURL: + description: challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + items: + type: string + type: array + emailHeaders: + description: emailHeaders is the set of headers to check for the email address + items: + type: string + type: array + headers: + description: headers is the set of headers to check for identity information + items: + type: string + type: array + loginURL: + description: loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for the display name + items: + type: string + type: array + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers to check for the preferred username + items: + type: string + type: array + type: object + type: + description: type identifies the identity provider type for this entry. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + templates: + description: templates allow you to customize pages like the login page. + properties: + error: + description: error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + login: + description: login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + providerSelection: + description: providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + tokenConfig: + description: tokenConfig contains options for authorization and access tokens + properties: + accessTokenInactivityTimeout: + description: "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. \n WARNING: existing tokens' timeout will not be affected (lowered) by changing this value" + type: string + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' + format: int32 + type: integer + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of access tokens + format: int32 + type: integer + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/operatorhubs.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/operatorhubs.yaml new file mode 100644 index 000000000..9f4c0a6d7 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/operatorhubs.yaml @@ -0,0 +1,84 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + capability.openshift.io/name: marketplace + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: operatorhubs.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OperatorHub + listKind: OperatorHubList + plural: operatorhubs + singular: operatorhub + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block. + items: + description: HubSource is used to specify the hub source and its configuration + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object + status: + description: OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here. + properties: + sources: + description: sources encapsulates the result of applying the configuration for each hub source + items: + description: HubSourceStatus is used to reflect the current state of applying the configuration to a default source + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + maxLength: 253 + minLength: 1 + type: string + status: + description: status indicates success or failure in applying the configuration + type: string + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/projects.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/projects.yaml new file mode 100644 index 000000000..9fc07dff3 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/projects.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: projects.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Project holds cluster-wide information about Project. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. + properties: + name: + description: name is the metadata.name of the referenced project request template + type: string + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/proxies.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/proxies.yaml new file mode 100644 index 000000000..acaeae852 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/proxies.yaml @@ -0,0 +1,78 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: proxies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Proxy + listKind: ProxyList + plural: proxies + singular: proxy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + items: + type: string + type: array + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/schedulers.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/schedulers.yaml new file mode 100644 index 000000000..a4fd5ef97 --- /dev/null +++ b/crd-catalog/openshift/api/config.openshift.io/v1/schedulers.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Scheduler + listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod''s selector. For example, defaultNodeSelector: "type=user-node,region=east" would set nodeSelector field in pod spec to "type=user-node,region=east" to all pods created in all namespaces. Namespaces having project-wide node selectors won''t be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: "type=user-node,region=west" means that the default of "type=user-node,region=east" set in defaultNodeSelector would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.' + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + profile: + description: "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"" + enum: + - '' + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + type: string + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.ignore b/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.ignore new file mode 100644 index 000000000..5feeef378 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.ignore @@ -0,0 +1 @@ +cannot find type `ConsoleCLIDownloadStatus` in this scope diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.yaml new file mode 100644 index 000000000..fd9baf91c --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleclidownloads.yaml @@ -0,0 +1,77 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console command line interface (CLI) downloads. + displayName: ConsoleCLIDownload + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consoleclidownloads.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleCLIDownload + listKind: ConsoleCLIDownloadList + plural: consoleclidownloads + singular: consoleclidownload + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: Display name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleCLIDownloadSpec is the desired cli download configuration. + properties: + description: + description: description is the description of the CLI download (can include markdown). + type: string + displayName: + description: displayName is the display name of the CLI download. + type: string + links: + description: links is a list of objects that provide CLI download link details. + items: + properties: + href: + description: href is the absolute secure URL for the link (must use https) + pattern: ^https:// + type: string + text: + description: text is the display text for the link + type: string + required: + - href + type: object + type: array + required: + - description + - displayName + - links + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.ignore b/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.ignore new file mode 100644 index 000000000..f18776c6c --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.ignore @@ -0,0 +1 @@ +cannot find type `ConsoleExternalLogLinkStatus` in this scope diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.yaml new file mode 100644 index 000000000..3033a8574 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleexternalloglinks.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. + displayName: ConsoleExternalLogLinks + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consoleexternalloglinks.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleExternalLogLink + listKind: ConsoleExternalLogLinkList + plural: consoleexternalloglinks + singular: consoleexternalloglink + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.hrefTemplate + name: HrefTemplate + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleExternalLogLinkSpec is the desired log link configuration. The log link will appear on the logs tab of the pod details page. + properties: + hrefTemplate: + description: "hrefTemplate is an absolute secure URL (must use https) for the log link including variables to be replaced. Variables are specified in the URL with the format ${variableName}, for instance, ${containerName} and will be replaced with the corresponding values from the resource. Resource is a pod. Supported variables are: - ${resourceName} - name of the resource which containes the logs - ${resourceUID} - UID of the resource which contains the logs - e.g. `11111111-2222-3333-4444-555555555555` - ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains the logs - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs - ${podLabels} - JSON representation of labels matching the pod with the logs - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}` \n e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}" + pattern: ^https:// + type: string + namespaceFilter: + description: namespaceFilter is a regular expression used to restrict a log link to a matching set of namespaces (e.g., `^openshift-`). The string is converted into a regular expression using the JavaScript RegExp constructor. If not specified, links will be displayed for all the namespaces. + type: string + text: + description: text is the display text for the link + type: string + required: + - hrefTemplate + - text + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.ignore b/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.ignore new file mode 100644 index 000000000..0496b5db2 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.ignore @@ -0,0 +1 @@ +cannot find type `ConsoleLinkStatus` in this scope diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.yaml new file mode 100644 index 000000000..4d71abd53 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolelinks.yaml @@ -0,0 +1,125 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for customizing OpenShift web console links + displayName: ConsoleLinks + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consolelinks.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleLink + listKind: ConsoleLinkList + plural: consolelinks + singular: consolelink + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.href + name: URL + type: string + - jsonPath: .spec.menu + name: Menu + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleLink is an extension for customizing OpenShift web console links. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleLinkSpec is the desired console link configuration. + properties: + applicationMenu: + description: applicationMenu holds information about section and icon used for the link in the application menu, and it is applicable only when location is set to ApplicationMenu. + properties: + imageURL: + description: imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. + type: string + section: + description: section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section. + type: string + required: + - section + type: object + href: + description: href is the absolute secure URL for the link (must use https) + pattern: ^https:// + type: string + location: + description: location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). + pattern: ^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$ + type: string + namespaceDashboard: + description: namespaceDashboard holds information about namespaces in which the dashboard link should appear, and it is applicable only when location is set to NamespaceDashboard. If not specified, the link will appear in all namespaces. + properties: + namespaceSelector: + description: namespaceSelector is used to select the Namespaces that should contain dashboard link by label. If the namespace labels match, dashboard link will be shown for the namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces is an array of namespace names in which the dashboard link should appear. + items: + type: string + type: array + type: object + text: + description: text is the display text for the link + type: string + required: + - href + - location + - text + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.ignore b/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.ignore new file mode 100644 index 000000000..6435f3465 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.ignore @@ -0,0 +1 @@ +cannot find type `ConsoleNotificationStatus` in this scope diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.yaml new file mode 100644 index 000000000..af4cefb2d --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolenotifications.yaml @@ -0,0 +1,84 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console notifications. + displayName: ConsoleNotification + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consolenotifications.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleNotification + listKind: ConsoleNotificationList + plural: consolenotifications + singular: consolenotification + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.location + name: Location + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleNotification is the extension for configuring openshift web console notifications. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleNotificationSpec is the desired console notification configuration. + properties: + backgroundColor: + description: backgroundColor is the color of the background for the notification as CSS data type color. + type: string + color: + description: color is the color of the text for the notification as CSS data type color. + type: string + link: + description: link is an object that holds notification link details. + properties: + href: + description: href is the absolute secure URL for the link (must use https) + pattern: ^https:// + type: string + text: + description: text is the display text for the link + type: string + required: + - href + - text + type: object + location: + description: 'location is the location of the notification in the console. Valid values are: "BannerTop", "BannerBottom", "BannerTopBottom".' + pattern: ^(BannerTop|BannerBottom|BannerTopBottom)$ + type: string + text: + description: text is the visible text of the notification. + type: string + required: + - text + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.args b/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.yaml new file mode 100644 index 000000000..86f778c49 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.yaml @@ -0,0 +1,182 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1186 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console plugins. + displayName: ConsolePlugin + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + service.beta.openshift.io/inject-cabundle: 'true' + name: consoleplugins.console.openshift.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: webhook + namespace: openshift-console-operator + path: /crdconvert + port: 9443 + conversionReviewVersions: + - v1 + - v1alpha1 + group: console.openshift.io + names: + kind: ConsolePlugin + listKind: ConsolePluginList + plural: consoleplugins + singular: consoleplugin + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + properties: + backend: + description: backend holds the configuration of backend which is serving console's plugin . + properties: + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + properties: + basePath: + default: / + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + maxLength: 256 + minLength: 1 + pattern: ^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$ + type: string + name: + description: name of Service that is serving the plugin assets. + maxLength: 128 + minLength: 1 + type: string + namespace: + description: namespace of Service that is serving the plugin assets. + maxLength: 128 + minLength: 1 + type: string + port: + description: port on which the Service that is serving the plugin is listening to. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - name + - namespace + - port + type: object + type: + description: "type is the backend type which servers the console's plugin. Currently only \"Service\" is supported. \n ---" + enum: + - Service + type: string + required: + - type + type: object + displayName: + description: displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters. + maxLength: 128 + minLength: 1 + type: string + i18n: + description: i18n is the configuration of plugin's localization resources. + properties: + loadType: + description: loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type. + enum: + - Preload + - Lazy + - '' + type: string + required: + - loadType + type: object + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + type: string + authorization: + default: None + description: authorization provides information about authorization type, which the proxied request should contain + enum: + - UserToken + - None + type: string + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + type: string + endpoint: + description: endpoint provides information about endpoint to which the request is proxied to. + properties: + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + properties: + name: + description: name of Service that the plugin needs to connect to. + maxLength: 128 + minLength: 1 + type: string + namespace: + description: namespace of Service that the plugin needs to connect to + maxLength: 128 + minLength: 1 + type: string + port: + description: port on which the Service that the plugin needs to connect to is listening on. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - name + - namespace + - port + type: object + type: + description: "type is the type of the console plugin's proxy. Currently only \"Service\" is supported. \n ---" + enum: + - Service + type: string + required: + - type + type: object + required: + - alias + - endpoint + type: object + type: array + required: + - backend + - displayName + type: object + required: + - metadata + - spec + type: object + served: true + storage: false diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolequickstarts.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consolequickstarts.yaml new file mode 100644 index 000000000..0d2e33c19 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolequickstarts.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/750 + capability.openshift.io/name: Console + description: Extension for guiding user through various workflows in the OpenShift web console. + displayName: ConsoleQuickStart + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consolequickstarts.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleQuickStart + listKind: ConsoleQuickStartList + plural: consolequickstarts + singular: consolequickstart + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleQuickStart is an extension for guiding user through various workflows in the OpenShift web console. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleQuickStartSpec is the desired quick start configuration. + properties: + accessReviewResources: + description: accessReviewResources contains a list of resources that the user's access will be reviewed against in order for the user to complete the Quick Start. The Quick Start will be hidden if any of the access reviews fail. + items: + description: ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface + properties: + group: + description: Group is the API Group of the Resource. "*" means all. + type: string + name: + description: Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + type: string + namespace: + description: Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + type: string + resource: + description: Resource is one of the existing resource types. "*" means all. + type: string + subresource: + description: Subresource is one of the existing resource types. "" means none. + type: string + verb: + description: 'Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.' + type: string + version: + description: Version is the API Version of the Resource. "*" means all. + type: string + type: object + type: array + conclusion: + description: conclusion sums up the Quick Start and suggests the possible next steps. (includes markdown) + type: string + description: + description: description is the description of the Quick Start. (includes markdown) + maxLength: 256 + minLength: 1 + type: string + displayName: + description: displayName is the display name of the Quick Start. + minLength: 1 + type: string + durationMinutes: + description: durationMinutes describes approximately how many minutes it will take to complete the Quick Start. + minimum: 1.0 + type: integer + icon: + description: icon is a base64 encoded image that will be displayed beside the Quick Start display name. The icon should be an vector image for easy scaling. The size of the icon should be 40x40. + type: string + introduction: + description: introduction describes the purpose of the Quick Start. (includes markdown) + minLength: 1 + type: string + nextQuickStart: + description: nextQuickStart is a list of the following Quick Starts, suggested for the user to try. + items: + type: string + type: array + prerequisites: + description: prerequisites contains all prerequisites that need to be met before taking a Quick Start. (includes markdown) + items: + type: string + type: array + tags: + description: tags is a list of strings that describe the Quick Start. + items: + type: string + type: array + tasks: + description: tasks is the list of steps the user has to perform to complete the Quick Start. + items: + description: ConsoleQuickStartTask is a single step in a Quick Start. + properties: + description: + description: description describes the steps needed to complete the task. (includes markdown) + minLength: 1 + type: string + review: + description: review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. using a radio button, which indicates whether the step was completed successfully. + properties: + failedTaskHelp: + description: failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) + minLength: 1 + type: string + instructions: + description: instructions contains steps that user needs to take in order to validate his work after going through a task. (includes markdown) + minLength: 1 + type: string + required: + - failedTaskHelp + - instructions + type: object + summary: + description: summary contains information about the passed step. + properties: + failed: + description: failed briefly describes the unsuccessfully passed task. (includes markdown) + maxLength: 128 + minLength: 1 + type: string + success: + description: success describes the succesfully passed task. + minLength: 1 + type: string + required: + - failed + - success + type: object + title: + description: title describes the task and is displayed as a step heading. + minLength: 1 + type: string + required: + - description + - title + type: object + minItems: 1 + type: array + required: + - description + - displayName + - durationMinutes + - introduction + - tasks + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consolesamples.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consolesamples.yaml new file mode 100644 index 000000000..6fd1c55a6 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consolesamples.yaml @@ -0,0 +1,167 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: ConsoleSample is an extension to customizing OpenShift web console by adding samples. + displayName: ConsoleSample + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consolesamples.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleSample + listKind: ConsoleSampleList + plural: consolesamples + singular: consolesample + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleSample is an extension to customizing OpenShift web console by adding samples. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec contains configuration for a console sample. + properties: + abstract: + description: "abstract is a short introduction to the sample. \n It is required and must be no more than 100 characters in length. \n The abstract is shown on the sample card tile below the title and provider and is limited to three lines of content." + maxLength: 100 + type: string + description: + description: "description is a long form explanation of the sample. \n It is required and can have a maximum length of **4096** characters. \n It is a README.md-like content for additional information, links, pre-conditions, and other instructions. It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting." + maxLength: 4096 + type: string + icon: + description: "icon is an optional base64 encoded image and shown beside the sample title. \n The format must follow the data: URL format and can have a maximum size of **10 KB**. \n data:[][;base64], \n For example: \n data:image;base64, plus the base64 encoded image. \n Vector images can also be used. SVG icons must start with: \n data:image/svg+xml;base64, plus the base64 encoded SVG image. \n All sample catalog icons will be shown on a white background (also when the dark theme is used). The web console ensures that different aspect ratios work correctly. Currently, the surface of the icon is at most 40x100px. \n For more information on the data URL format, please visit https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs." + maxLength: 14000 + pattern: ^data:([a-z/\.+0-9]*;(([-a-zA-Z0-9=])*;)?)?base64, + type: string + provider: + description: "provider is an optional label to honor who provides the sample. \n It is optional and must be no more than 50 characters in length. \n A provider can be a company like \"Red Hat\" or an organization like \"CNCF\" or \"Knative\". \n Currently, the provider is only shown on the sample card tile below the title with the prefix \"Provided by \"" + maxLength: 50 + type: string + source: + description: source defines where to deploy the sample service from. The sample may be sourced from an external git repository or container image. + properties: + containerImport: + description: containerImport allows the user import a container image. + properties: + image: + description: "reference to a container image that provides a HTTP service. The service must be exposed on the default port (8080) unless otherwise configured with the port field. \n Supported formats: - / - docker.io// - quay.io// - quay.io//@sha256: - quay.io//:" + maxLength: 256 + minLength: 1 + type: string + service: + default: + targetPort: 8080 + description: service contains configuration for the Service resource created for this sample. + properties: + targetPort: + default: 8080 + description: targetPort is the port that the service listens on for HTTP requests. This port will be used for Service and Route created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + type: object + required: + - image + type: object + gitImport: + description: gitImport allows the user to import code from a git repository. + properties: + repository: + description: repository contains the reference to the actual Git repository. + properties: + contextDir: + description: contextDir is used to specify a directory within the repository to build the component. Must start with `/` and have a maximum length of 256 characters. When omitted, the default value is to build from the root of the repository. + maxLength: 256 + pattern: ^/ + type: string + revision: + description: revision is the git revision at which to clone the git repository Can be used to clone a specific branch, tag or commit SHA. Must be at most 256 characters in length. When omitted the repository's default branch is used. + maxLength: 256 + type: string + url: + description: "url of the Git repository that contains a HTTP service. The HTTP service must be exposed on the default port (8080) unless otherwise configured with the port field. \n Only public repositories on GitHub, GitLab and Bitbucket are currently supported: \n - https://github.com// - https://gitlab.com// - https://bitbucket.org// \n The url must have a maximum length of 256 characters." + maxLength: 256 + minLength: 1 + pattern: ^https:\/\/(github.com|gitlab.com|bitbucket.org)\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+(.git)?$ + type: string + required: + - url + type: object + service: + default: + targetPort: 8080 + description: service contains configuration for the Service resource created for this sample. + properties: + targetPort: + default: 8080 + description: targetPort is the port that the service listens on for HTTP requests. This port will be used for Service created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + type: object + required: + - repository + type: object + type: + allOf: + - enum: + - GitImport + - ContainerImport + - enum: + - GitImport + - ContainerImport + description: 'type of the sample, currently supported: "GitImport";"ContainerImport"' + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: source.gitImport is required when source.type is GitImport, and forbidden otherwise + rule: 'self.type == ''GitImport'' ? has(self.gitImport) : !has(self.gitImport)' + - message: source.containerImport is required when source.type is ContainerImport, and forbidden otherwise + rule: 'self.type == ''ContainerImport'' ? has(self.containerImport) : !has(self.containerImport)' + tags: + description: "tags are optional string values that can be used to find samples in the samples catalog. \n Examples of common tags may be \"Java\", \"Quarkus\", etc. \n They will be displayed on the samples details page." + items: + type: string + maxItems: 10 + type: array + x-kubernetes-list-type: set + title: + description: "title is the display name of the sample. \n It is required and must be no more than 50 characters in length." + maxLength: 50 + minLength: 1 + type: string + type: + description: "type is an optional label to group multiple samples. \n It is optional and must be no more than 20 characters in length. \n Recommendation is a singular term like \"Builder Image\", \"Devfile\" or \"Serverless Function\". \n Currently, the type is shown a badge on the sample card tile in the top right corner." + maxLength: 20 + type: string + required: + - abstract + - description + - source + - title + type: object + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/console.openshift.io/v1/consoleyamlsamples.yaml b/crd-catalog/openshift/api/console.openshift.io/v1/consoleyamlsamples.yaml new file mode 100644 index 000000000..c2cc79fe9 --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1/consoleyamlsamples.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console YAML samples. + displayName: ConsoleYAMLSample + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consoleyamlsamples.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleYAMLSample + listKind: ConsoleYAMLSampleList + plural: consoleyamlsamples + singular: consoleyamlsample + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleYAMLSampleSpec is the desired YAML sample configuration. Samples will appear with their descriptions in a samples sidebar when creating a resources in the web console. + properties: + description: + description: description of the YAML sample. + pattern: ^(.|\s)*\S(.|\s)*$ + type: string + snippet: + description: snippet indicates that the YAML sample is not the full YAML resource definition, but a fragment that can be inserted into the existing YAML document at the user's cursor. + type: boolean + targetResource: + description: targetResource contains apiVersion and kind of the resource YAML sample is representating. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + type: object + title: + description: title of the YAML sample. + pattern: ^(.|\s)*\S(.|\s)*$ + type: string + yaml: + description: yaml is the YAML sample to display. + pattern: ^(.|\s)*\S(.|\s)*$ + type: string + required: + - description + - targetResource + - title + - yaml + type: object + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/console.openshift.io/v1alpha1/consoleplugins.yaml b/crd-catalog/openshift/api/console.openshift.io/v1alpha1/consoleplugins.yaml new file mode 100644 index 000000000..89761cfda --- /dev/null +++ b/crd-catalog/openshift/api/console.openshift.io/v1alpha1/consoleplugins.yaml @@ -0,0 +1,146 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1186 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console plugins. + displayName: ConsolePlugin + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + service.beta.openshift.io/inject-cabundle: 'true' + name: consoleplugins.console.openshift.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: webhook + namespace: openshift-console-operator + path: /crdconvert + port: 9443 + conversionReviewVersions: + - v1 + - v1alpha1 + group: console.openshift.io + names: + kind: ConsolePlugin + listKind: ConsolePluginList + plural: consoleplugins + singular: consoleplugin + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + properties: + displayName: + description: displayName is the display name of the plugin. + minLength: 1 + type: string + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + type: string + authorize: + default: false + description: "authorize indicates if the proxied request should contain the logged-in user's OpenShift access token in the \"Authorization\" request header. For example: \n Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 \n By default the access token is not part of the proxied request." + type: boolean + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + type: string + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + properties: + name: + description: name of Service that the plugin needs to connect to. + maxLength: 128 + minLength: 1 + type: string + namespace: + description: namespace of Service that the plugin needs to connect to + maxLength: 128 + minLength: 1 + type: string + port: + description: port on which the Service that the plugin needs to connect to is listening on. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - name + - namespace + - port + type: object + type: + description: type is the type of the console plugin's proxy. Currently only "Service" is supported. + pattern: ^(Service)$ + type: string + required: + - alias + - type + type: object + type: array + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + properties: + basePath: + default: / + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + minLength: 1 + pattern: ^/ + type: string + name: + description: name of Service that is serving the plugin assets. + maxLength: 128 + minLength: 1 + type: string + namespace: + description: namespace of Service that is serving the plugin assets. + maxLength: 128 + minLength: 1 + type: string + port: + description: port on which the Service that is serving the plugin is listening to. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - basePath + - name + - namespace + - port + type: object + required: + - service + type: object + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/controlplane.operator.openshift.io/v1alpha1/podnetworkconnectivitychecks.yaml b/crd-catalog/openshift/api/controlplane.operator.openshift.io/v1alpha1/podnetworkconnectivitychecks.yaml new file mode 100644 index 000000000..7173dbe9e --- /dev/null +++ b/crd-catalog/openshift/api/controlplane.operator.openshift.io/v1alpha1/podnetworkconnectivitychecks.yaml @@ -0,0 +1,227 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/639 + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: podnetworkconnectivitychecks.controlplane.operator.openshift.io +spec: + group: controlplane.operator.openshift.io + names: + kind: PodNetworkConnectivityCheck + listKind: PodNetworkConnectivityCheckList + plural: podnetworkconnectivitychecks + singular: podnetworkconnectivitycheck + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "PodNetworkConnectivityCheck \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the source and target of the connectivity check + properties: + sourcePod: + description: SourcePod names the pod from which the condition will be checked + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + targetEndpoint: + description: EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup. + pattern: ^\S+:\d*$ + type: string + tlsClientCert: + description: TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + required: + - sourcePod + - targetEndpoint + type: object + status: + description: Status contains the observed status of the connectivity check + properties: + conditions: + description: Conditions summarize the status of the check + items: + description: PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + nullable: true + type: string + message: + description: Message indicating details about last transition in a human readable format. + type: string + reason: + description: Reason for the condition's last status transition in a machine readable format. + type: string + status: + description: Status of the condition + type: string + type: + description: Type of the condition + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + failures: + description: Failures contains logs of unsuccessful check actions + items: + description: LogEntry records events + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + nullable: true + type: string + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + format: date-time + nullable: true + type: string + required: + - success + - time + type: object + type: array + outages: + description: Outages contains logs of time periods of outages + items: + description: OutageEntry records time period of an outage + properties: + end: + description: End of outage detected + format: date-time + nullable: true + type: string + endLogs: + description: EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it. + items: + description: LogEntry records events + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + nullable: true + type: string + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + format: date-time + nullable: true + type: string + required: + - success + - time + type: object + type: array + message: + description: Message summarizes outage details in a human readable format. + type: string + start: + description: Start of outage detected + format: date-time + nullable: true + type: string + startLogs: + description: StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed. + items: + description: LogEntry records events + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + nullable: true + type: string + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + format: date-time + nullable: true + type: string + required: + - success + - time + type: object + type: array + required: + - start + type: object + type: array + successes: + description: Successes contains logs successful check actions + items: + description: LogEntry records events + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + nullable: true + type: string + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + format: date-time + nullable: true + type: string + required: + - success + - time + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.args b/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.yaml b/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.yaml new file mode 100644 index 000000000..0fe8d5b5d --- /dev/null +++ b/crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.yaml @@ -0,0 +1,147 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/xxx + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: stableconfigtypes.example.openshift.io +spec: + group: example.openshift.io + names: + kind: StableConfigType + listKind: StableConfigTypeList + plural: stableconfigtypes + singular: stableconfigtype + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "StableConfigType is a stable config type that may include TechPreviewNoUpgrade fields. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the StableConfigType. + properties: + celUnion: + description: celUnion demonstrates how to validate a discrminated union using CEL + properties: + optionalMember: + description: optionalMember is a union member that is optional. + type: string + requiredMember: + description: requiredMember is a union member that is required. + type: string + type: + description: type determines which of the union members should be populated. + enum: + - RequiredMember + - OptionalMember + - EmptyMember + type: string + type: object + x-kubernetes-validations: + - message: requiredMember is required when type is RequiredMember, and forbidden otherwise + rule: 'has(self.type) && self.type == ''RequiredMember'' ? has(self.requiredMember) : !has(self.requiredMember)' + - message: optionalMember is forbidden when type is not OptionalMember + rule: 'has(self.type) && self.type == ''OptionalMember'' ? true : !has(self.optionalMember)' + evolvingUnion: + description: evolvingUnion demonstrates how to phase in new values into discriminated union + properties: + type: + description: type is the discriminator. It has different values for Default and for TechPreviewNoUpgrade + enum: + - '' + - StableValue + type: string + type: object + immutableField: + description: immutableField is a field that is immutable once the object has been created. It is required at all times. + type: string + x-kubernetes-validations: + - message: immutableField is immutable + rule: self == oldSelf + optionalImmutableField: + description: optionalImmutableField is a field that is immutable once set. It is optional but may not be changed once set. + type: string + x-kubernetes-validations: + - message: optionalImmutableField is immutable once set + rule: oldSelf == '' || self == oldSelf + stableField: + description: "stableField is a field that is present on default clusters and on tech preview clusters \n If empty, the platform will choose a good default, which may change over time without notice." + type: string + required: + - immutableField + type: object + status: + description: status is the most recently observed status of the StableConfigType. + properties: + conditions: + description: 'Represents the observations of a foo''s current state. Known .status.conditions.type are: "Available", "Progressing", and "Degraded"' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + immutableField: + description: immutableField is a field that is immutable once the object has been created. It is required at all times. + type: string + x-kubernetes-validations: + - message: immutableField is immutable + rule: self == oldSelf + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.args b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.yaml b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.yaml new file mode 100644 index 000000000..1e2c2d586 --- /dev/null +++ b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.yaml @@ -0,0 +1,130 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/598 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: helmchartrepositories.helm.openshift.io +spec: + group: helm.openshift.io + names: + kind: HelmChartRepository + listKind: HelmChartRepositoryList + plural: helmchartrepositories + singular: helmchartrepository + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + connectionConfig: + description: Required configuration for connecting to the chart repo + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + tlsClientConfig: + description: tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: Chart repository URL + maxLength: 2048 + pattern: ^https?:\/\/ + type: string + type: object + description: + description: Optional human readable repository description, it can be used by UI for displaying purposes + maxLength: 2048 + minLength: 1 + type: string + disabled: + description: If set to true, disable the repo usage in the cluster/namespace + type: boolean + name: + description: Optional associated human readable repository name, it can be used by UI for displaying purposes + maxLength: 100 + minLength: 1 + type: string + type: object + status: + description: Observed status of the repository within the cluster.. + properties: + conditions: + description: conditions is a list of conditions and their statuses + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.args b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.yaml b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.yaml new file mode 100644 index 000000000..4c1a2eb20 --- /dev/null +++ b/crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.yaml @@ -0,0 +1,139 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1084 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: projecthelmchartrepositories.helm.openshift.io +spec: + group: helm.openshift.io + names: + kind: ProjectHelmChartRepository + listKind: ProjectHelmChartRepositoryList + plural: projecthelmchartrepositories + singular: projecthelmchartrepository + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + connectionConfig: + description: Required configuration for connecting to the chart repo + properties: + basicAuthConfig: + description: basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key "username" is used locate the username. The key "password" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + tlsClientConfig: + description: tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: Chart repository URL + maxLength: 2048 + pattern: ^https?:\/\/ + type: string + type: object + description: + description: Optional human readable repository description, it can be used by UI for displaying purposes + maxLength: 2048 + minLength: 1 + type: string + disabled: + description: If set to true, disable the repo usage in the namespace + type: boolean + name: + description: Optional associated human readable repository name, it can be used by UI for displaying purposes + maxLength: 100 + minLength: 1 + type: string + type: object + status: + description: Observed status of the repository within the namespace.. + properties: + conditions: + description: conditions is a list of conditions and their statuses + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/configs.yaml b/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/configs.yaml new file mode 100644 index 000000000..497ef6091 --- /dev/null +++ b/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/configs.yaml @@ -0,0 +1,1351 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/519 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: configs.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Config is the configuration object for a registry instance managed by the registry operator \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImageRegistrySpec defines the specs for the running registry. + properties: + affinity: + description: affinity is a group of node affinity scheduling rules for the image registry pod(s). + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + defaultRoute: + description: defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname. + type: boolean + disableRedirect: + description: disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend. + type: boolean + httpSecret: + description: httpSecret is the value needed by the registry to secure uploads, generated by default. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + logging: + description: logging is deprecated, use logLevel instead. + format: int64 + type: integer + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeSelector: + additionalProperties: + type: string + description: nodeSelector defines the node selection constraints for the registry pod. + type: object + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + proxy: + description: proxy defines the proxy to be used when calling master api, upstream registries, etc. + properties: + http: + description: http defines the proxy to be used by the image registry when accessing HTTP endpoints. + type: string + https: + description: https defines the proxy to be used by the image registry when accessing HTTPS endpoints. + type: string + noProxy: + description: noProxy defines a comma-separated list of host names that shouldn't go through any proxy. + type: string + type: object + readOnly: + description: readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones. + type: boolean + replicas: + description: replicas determines the number of registry instances to run. + format: int32 + type: integer + requests: + description: requests controls how many parallel requests a given registry instance will handle before queuing additional requests. + properties: + read: + description: read defines limits for image registry's reads. + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + format: duration + type: string + type: object + write: + description: write defines limits for image registry's writes. + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + format: duration + type: string + type: object + type: object + resources: + description: resources defines the resource requests+limits for the registry pod. + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + rolloutStrategy: + description: rolloutStrategy defines rollout strategy for the image registry deployment. + pattern: ^(RollingUpdate|Recreate)$ + type: string + routes: + description: routes defines additional external facing routes which should be created for the registry. + items: + description: ImageRegistryConfigRoute holds information on external route access to image registry. + properties: + hostname: + description: hostname for the route. + type: string + name: + description: name of the route to be created. + type: string + secretName: + description: secretName points to secret containing the certificates to be used by the route. + type: string + required: + - name + type: object + type: array + storage: + description: storage details for configuring registry storage, e.g. S3 bucket coordinates. + properties: + azure: + description: azure represents configuration that uses Azure Blob Storage. + properties: + accountName: + description: accountName defines the account to be used by the registry. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + type: string + container: + description: container defines Azure's container to be used by registry. + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + networkAccess: + default: + type: External + description: 'networkAccess defines the network access properties for the storage account. Defaults to type: External.' + properties: + internal: + description: 'internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.' + properties: + networkResourceGroupName: + description: networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + maxLength: 90 + minLength: 1 + pattern: ^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$ + type: string + privateEndpointName: + description: privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + subnetName: + description: subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + maxLength: 80 + minLength: 1 + pattern: ^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + vnetName: + description: vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + type: object + type: + default: External + description: 'type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster''s vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External".' + enum: + - Internal + - External + type: string + type: object + x-kubernetes-validations: + - message: internal is forbidden when type is not Internal + rule: 'has(self.type) && self.type == ''Internal'' ? true : !has(self.internal)' + type: object + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + type: string + type: object + ibmcos: + description: ibmcos represents configuration that uses IBM Cloud Object Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + location: + description: location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + type: string + resourceGroupName: + description: resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + type: string + resourceKeyCRN: + description: resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$ + type: string + serviceInstanceCRN: + description: serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$ + type: string + type: object + managementState: + description: managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + pattern: ^(Managed|Unmanaged)$ + type: string + oss: + description: Oss represents configuration that uses Alibaba Cloud Object Storage Service. + properties: + bucket: + description: Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + encryption: + anyOf: + - not: + required: + - kms + properties: + method: + not: + enum: + - KMS + - properties: + method: + enum: + - KMS + required: + - kms + description: Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + properties: + kms: + description: KMS (key management service) is an encryption type that holds the struct for KMS KeyID + properties: + keyID: + description: KeyID holds the KMS encryption key ID + minLength: 1 + type: string + required: + - keyID + type: object + method: + default: AES256 + description: Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + enum: + - KMS + - AES256 + type: string + type: object + endpointAccessibility: + default: Internal + description: EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + enum: + - Internal + - Public + - '' + type: string + region: + description: Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + type: string + type: object + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + properties: + claim: + description: claim defines the Persisent Volume Claim's name to be used. + type: string + type: object + s3: + description: s3 represents configuration that uses Amazon Simple Storage Service. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront session. + format: duration + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the private key, provided by AWS. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - baseURL + - keypairID + - privateKey + type: object + encrypt: + description: encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + type: string + trustedCA: + description: "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. \n The namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\"." + properties: + name: + description: name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + type: object + virtualHostedStyle: + description: virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + type: boolean + type: object + swift: + description: swift represents configuration that uses OpenStack Object Storage. + properties: + authURL: + description: authURL defines the URL for obtaining an authentication token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by registry. + type: string + type: object + type: object + tolerations: + description: tolerations defines the tolerations for the registry pod. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: topologySpreadConstraints specify how to spread matching pods among the given topology. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - replicas + type: object + status: + description: ImageRegistryStatus reports image registry operational status. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + storage: + description: storage indicates the current applied storage configuration of the registry. + properties: + azure: + description: azure represents configuration that uses Azure Blob Storage. + properties: + accountName: + description: accountName defines the account to be used by the registry. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + type: string + container: + description: container defines Azure's container to be used by registry. + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + networkAccess: + default: + type: External + description: 'networkAccess defines the network access properties for the storage account. Defaults to type: External.' + properties: + internal: + description: 'internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.' + properties: + networkResourceGroupName: + description: networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + maxLength: 90 + minLength: 1 + pattern: ^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$ + type: string + privateEndpointName: + description: privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + subnetName: + description: subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + maxLength: 80 + minLength: 1 + pattern: ^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + vnetName: + description: vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + maxLength: 64 + minLength: 2 + pattern: ^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$ + type: string + type: object + type: + default: External + description: 'type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster''s vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External".' + enum: + - Internal + - External + type: string + type: object + x-kubernetes-validations: + - message: internal is forbidden when type is not Internal + rule: 'has(self.type) && self.type == ''Internal'' ? true : !has(self.internal)' + type: object + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + type: string + type: object + ibmcos: + description: ibmcos represents configuration that uses IBM Cloud Object Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + location: + description: location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + type: string + resourceGroupName: + description: resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + type: string + resourceKeyCRN: + description: resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$ + type: string + serviceInstanceCRN: + description: serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$ + type: string + type: object + managementState: + description: managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + pattern: ^(Managed|Unmanaged)$ + type: string + oss: + description: Oss represents configuration that uses Alibaba Cloud Object Storage Service. + properties: + bucket: + description: Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + encryption: + description: Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + properties: + kms: + description: KMS (key management service) is an encryption type that holds the struct for KMS KeyID + properties: + keyID: + description: KeyID holds the KMS encryption key ID + minLength: 1 + type: string + required: + - keyID + type: object + method: + default: AES256 + description: Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + enum: + - KMS + - AES256 + type: string + type: object + endpointAccessibility: + default: Internal + description: EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + enum: + - Internal + - Public + - '' + type: string + region: + description: Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + type: string + type: object + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + properties: + claim: + description: claim defines the Persisent Volume Claim's name to be used. + type: string + type: object + s3: + description: s3 represents configuration that uses Amazon Simple Storage Service. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront session. + format: duration + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the private key, provided by AWS. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - baseURL + - keypairID + - privateKey + type: object + encrypt: + description: encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + type: string + trustedCA: + description: "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. \n The namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\"." + properties: + name: + description: name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + type: object + virtualHostedStyle: + description: virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + type: boolean + type: object + swift: + description: swift represents configuration that uses OpenStack Object Storage. + properties: + authURL: + description: authURL defines the URL for obtaining an authentication token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by registry. + type: string + type: object + type: object + storageManaged: + description: storageManaged is deprecated, please refer to Storage.managementState + type: boolean + version: + description: version is the level this availability applies to + type: string + required: + - storage + - storageManaged + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/imagepruners.yaml b/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/imagepruners.yaml new file mode 100644 index 000000000..d874b9dc5 --- /dev/null +++ b/crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/imagepruners.yaml @@ -0,0 +1,644 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/555 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: imagepruners.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + names: + kind: ImagePruner + listKind: ImagePrunerList + plural: imagepruners + singular: imagepruner + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImagePruner is the configuration object for an image registry pruner managed by the registry operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImagePrunerSpec defines the specs for the running image pruner. + properties: + affinity: + description: affinity is a group of node affinity scheduling rules for the image pruner pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + failedJobsHistoryLimit: + description: failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set. + format: int32 + type: integer + ignoreInvalidImageReferences: + description: ignoreInvalidImageReferences indicates whether the pruner can ignore errors while parsing image references. + type: boolean + keepTagRevisions: + description: keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 3. + type: integer + keepYoungerThan: + description: 'keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence.' + format: int64 + type: integer + keepYoungerThanDuration: + description: keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 60m (60 minutes). + format: duration + type: string + logLevel: + default: Normal + description: "logLevel sets the level of log output for the pruner job. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + nodeSelector: + additionalProperties: + type: string + description: nodeSelector defines the node selection constraints for the image pruner pod. + type: object + resources: + description: resources defines the resource requests and limits for the image pruner pod. + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + schedule: + description: 'schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`.' + type: string + successfulJobsHistoryLimit: + description: successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set. + format: int32 + type: integer + suspend: + description: suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false. + type: boolean + tolerations: + description: tolerations defines the node tolerations for the image pruner pod. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + status: + description: ImagePrunerStatus reports image pruner operational status. + properties: + conditions: + description: conditions is a list of conditions and their status. + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change that has been applied. + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.args b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml new file mode 100644 index 000000000..1acc05c58 --- /dev/null +++ b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml @@ -0,0 +1,130 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/584 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: dnsrecords.ingress.operator.openshift.io +spec: + group: ingress.operator.openshift.io + names: + kind: DNSRecord + listKind: DNSRecordList + plural: dnsrecords + singular: dnsrecord + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNSRecord is a DNS record managed in the zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. \n Cluster admin manipulation of this resource is not supported. This resource is only for internal communication of OpenShift operators. \n If DNSManagementPolicy is \"Unmanaged\", the operator will not be responsible for managing the DNS records on the cloud provider. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the dnsRecord. + properties: + dnsManagementPolicy: + default: Managed + description: "dnsManagementPolicy denotes the current policy applied on the DNS record. Records that have policy set as \"Unmanaged\" are ignored by the ingress operator. This means that the DNS record on the cloud provider is not managed by the operator, and the \"Published\" status condition will be updated to \"Unknown\" status, since it is externally managed. Any existing record on the cloud provider can be deleted at the discretion of the cluster admin. \n This field defaults to Managed. Valid values are \"Managed\" and \"Unmanaged\"." + enum: + - Managed + - Unmanaged + type: string + dnsName: + description: dnsName is the hostname of the DNS record + minLength: 1 + type: string + recordTTL: + description: recordTTL is the record TTL in seconds. If zero, the default is 30. RecordTTL will not be used in AWS regions Alias targets, but will be used in CNAME targets, per AWS API contract. + format: int64 + minimum: 0.0 + type: integer + recordType: + description: recordType is the DNS record type. For example, "A" or "CNAME". + enum: + - CNAME + - A + type: string + targets: + description: targets are record targets. + items: + type: string + minItems: 1 + type: array + required: + - dnsManagementPolicy + - dnsName + - recordTTL + - recordType + - targets + type: object + status: + description: status is the most recently observed status of the dnsRecord. + properties: + observedGeneration: + description: observedGeneration is the most recently observed generation of the DNSRecord. When the DNSRecord is updated, the controller updates the corresponding record in each managed zone. If an update for a particular zone fails, that failure is recorded in the status condition for the zone so that the controller can determine that it needs to retry the update for that specific zone. + format: int64 + type: integer + zones: + description: zones are the status of the record in each zone. + items: + description: DNSZoneStatus is the status of a record within a specific zone. + properties: + conditions: + description: "conditions are any conditions associated with the record in the zone. \n If publishing the record succeeds, the \"Published\" condition will be set with status \"True\" and upon failure it will be set to \"False\" along with the reason and message describing the cause of the failure." + items: + description: DNSZoneCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + minLength: 1 + type: string + type: + minLength: 1 + type: string + required: + - status + - type + type: object + type: array + dnsZone: + description: dnsZone is the zone where the record is published. + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.args b/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.yaml b/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.yaml new file mode 100644 index 000000000..3d13af662 --- /dev/null +++ b/crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.yaml @@ -0,0 +1,323 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1365 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: datagathers.insights.openshift.io +spec: + group: insights.openshift.io + names: + kind: DataGather + listKind: DataGatherList + plural: datagathers + singular: datagather + scope: Cluster + versions: + - additionalPrinterColumns: + - description: DataGather job state + jsonPath: .status.dataGatherState + name: State + type: string + - description: DataGather start time + jsonPath: .status.startTime + name: StartTime + type: date + - description: DataGather finish time + jsonPath: .status.finishTime + name: FinishTime + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: "DataGather provides data gather configuration options and status for the particular Insights data gathering. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + dataPolicy: + description: dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are "ClearText" and "ObfuscateNetworking". When set to ClearText the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is ClearText. + enum: + - '' + - ClearText + - ObfuscateNetworking + type: string + gatherers: + description: 'gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: "oc get insightsoperators.operator.openshift.io cluster -o json | jq ''.status.gatherStatus.gatherers[].name''"' + items: + description: gathererConfig allows to configure specific gatherers + properties: + name: + description: name is the name of specific gatherer + type: string + state: + description: state allows you to configure specific gatherer. Valid values are "Enabled", "Disabled" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default. The current default is Enabled. + enum: + - '' + - Enabled + - Disabled + type: string + required: + - name + type: object + type: array + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: conditions provide details on the status of the gatherer job. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + dataGatherState: + description: dataGatherState reflects the current state of the data gathering process. + enum: + - Running + - Completed + - Failed + - Pending + type: string + x-kubernetes-validations: + - message: dataGatherState cannot transition from Running to Pending + rule: '!(oldSelf == ''Running'' && self == ''Pending'')' + - message: dataGatherState cannot transition from Completed to Pending + rule: '!(oldSelf == ''Completed'' && self == ''Pending'')' + - message: dataGatherState cannot transition from Failed to Pending + rule: '!(oldSelf == ''Failed'' && self == ''Pending'')' + - message: dataGatherState cannot transition from Completed to Running + rule: '!(oldSelf == ''Completed'' && self == ''Running'')' + - message: dataGatherState cannot transition from Failed to Running + rule: '!(oldSelf == ''Failed'' && self == ''Running'')' + finishTime: + description: finishTime is the time when Insights data gathering finished. + format: date-time + type: string + x-kubernetes-validations: + - message: finishTime is immutable once set + rule: self == oldSelf + gatherers: + description: gatherers is a list of active gatherers (and their statuses) in the last gathering. + items: + description: gathererStatus represents information about a particular data gatherer. + properties: + conditions: + description: conditions provide details on the status of each gatherer. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lastGatherDuration: + description: lastGatherDuration represents the time spent gathering. + pattern: ^([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + name: + description: name is the name of the gatherer. + maxLength: 256 + minLength: 5 + type: string + required: + - conditions + - lastGatherDuration + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + insightsReport: + description: insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by "insightsRequestID") is not available. + properties: + downloadedAt: + description: downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + format: date-time + type: string + healthChecks: + description: healthChecks provides basic information about active Insights health checks in a cluster. + items: + description: healthCheck represents an Insights health check attributes. + properties: + advisorURI: + description: advisorURI provides the URL link to the Insights Advisor. + pattern: ^https:\/\/\S+ + type: string + description: + description: description provides basic description of the healtcheck. + maxLength: 2048 + minLength: 10 + type: string + state: + description: state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. + enum: + - Enabled + - Disabled + type: string + totalRisk: + description: totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. + format: int32 + maximum: 4.0 + minimum: 1.0 + type: integer + required: + - advisorURI + - description + - state + - totalRisk + type: object + type: array + x-kubernetes-list-type: atomic + uri: + description: uri provides the URL link from which the report was downloaded. + pattern: ^https:\/\/\S+ + type: string + type: object + insightsRequestID: + description: insightsRequestID is an Insights request ID to track the status of the Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive. + type: string + x-kubernetes-validations: + - message: insightsRequestID is immutable once set + rule: self == oldSelf + relatedObjects: + description: relatedObjects is a list of resources which are useful when debugging or inspecting the data gathering Pod + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + properties: + group: + description: 'group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc.' + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: 'resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc.' + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - group + - name + - resource + type: object + type: array + startTime: + description: startTime is the time when Insights data gathering started. + format: date-time + type: string + x-kubernetes-validations: + - message: startTime is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: cannot remove insightsRequestID attribute from status + rule: (!has(oldSelf.insightsRequestID) || has(self.insightsRequestID)) + - message: cannot remove startTime attribute from status + rule: (!has(oldSelf.startTime) || has(self.startTime)) + - message: cannot remove finishTime attribute from status + rule: (!has(oldSelf.finishTime) || has(self.finishTime)) + - message: cannot remove dataGatherState attribute from status + rule: (!has(oldSelf.dataGatherState) || has(self.dataGatherState)) + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.args b/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.yaml b/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.yaml new file mode 100644 index 000000000..d70df559e --- /dev/null +++ b/crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.yaml @@ -0,0 +1,589 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + release.openshift.io/feature-set: Default + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + properties: + replicas: + default: 3 + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + enum: + - 3 + - 5 + format: int32 + type: integer + x-kubernetes-validations: + - message: replicas is immutable + rule: self == oldSelf + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: selector is immutable + rule: self == oldSelf + state: + default: Inactive + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + enum: + - Active + - Inactive + type: string + x-kubernetes-validations: + - message: state cannot be changed once Active + rule: oldSelf != 'Active' || self == oldSelf + strategy: + default: + type: RollingUpdate + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + properties: + type: + default: RollingUpdate + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + enum: + - RollingUpdate + - OnDelete + type: string + type: object + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + enum: + - machines_v1beta1_machine_openshift_io + type: string + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + required: + - availabilityZone + type: object + subnet: + description: Subnet is a reference to the subnet to use for this instance. + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + items: + type: string + type: array + required: + - name + type: object + type: array + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + enum: + - ID + - ARN + - Filters + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: id is required when type is ID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + - message: arn is required when type is ARN, and forbidden otherwise + rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + - message: filters is required when type is Filters, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + type: object + type: array + azure: + description: Azure configures failure domain information for the Azure platform. + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + properties: + subnet: + description: subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + maxLength: 80 + pattern: ^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$ + type: string + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + required: + - zone + type: object + type: array + gcp: + description: GCP configures failure domain information for the GCP platform. + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + required: + - zone + type: object + type: array + nutanix: + description: nutanix configures failure domain information for the Nutanix platform. + items: + description: NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. + properties: + name: + description: name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + type: string + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + type: string + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + maxLength: 255 + minLength: 1 + type: string + required: + - volumeType + type: object + type: object + x-kubernetes-validations: + - message: rootVolume.availabilityZone is required when availabilityZone is set + rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + type: array + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. + enum: + - '' + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + required: + - platform + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and forbidden otherwise + rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + - message: azure configuration is required when platform is Azure, and forbidden otherwise + rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + - message: gcp configuration is required when platform is GCP, and forbidden otherwise + rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + - message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + - message: nutanix configuration is required when platform is Nutanix, and forbidden otherwise + rule: 'has(self.platform) && self.platform == ''Nutanix'' ? has(self.nutanix) : !has(self.nutanix)' + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + x-kubernetes-validations: + - message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + - message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + - message: label 'machine.openshift.io/cluster-api-cluster' is required + rule: '''machine.openshift.io/cluster-api-cluster'' in self' + required: + - labels + type: object + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: object + required: + - metadata + - spec + type: object + required: + - machineType + type: object + x-kubernetes-validations: + - message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + required: + - replicas + - selector + - template + type: object + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + format: int64 + type: integer + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + format: int32 + type: integer + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + format: int32 + type: integer + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + format: int32 + type: integer + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinehealthchecks.yaml b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinehealthchecks.yaml new file mode 100644 index 000000000..cada51647 --- /dev/null +++ b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinehealthchecks.yaml @@ -0,0 +1,194 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: machinehealthchecks.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineHealthCheck + listKind: MachineHealthCheckList + plural: machinehealthchecks + shortNames: + - mhc + - mhcs + singular: machinehealthcheck + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Maximum number of unhealthy machines allowed + jsonPath: .spec.maxUnhealthy + name: MaxUnhealthy + type: string + - description: Number of machines currently monitored + jsonPath: .status.expectedMachines + name: ExpectedMachines + type: integer + - description: Current observed healthy machines + jsonPath: .status.currentHealthy + name: CurrentHealthy + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of machine health check policy + properties: + maxUnhealthy: + anyOf: + - type: integer + - type: string + default: 100% + description: Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + nodeStartupTimeout: + default: 10m + description: Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to "0". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + pattern: ^0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + remediationTemplate: + description: "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. \n This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + selector: + description: 'Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + unhealthyConditions: + description: UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy. + items: + description: UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy. + properties: + status: + minLength: 1 + type: string + timeout: + description: Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + type: + minLength: 1 + type: string + type: object + minItems: 1 + type: array + type: object + status: + description: Most recently observed status of MachineHealthCheck resource + properties: + conditions: + description: Conditions defines the current state of the MachineHealthCheck + items: + description: Condition defines an observation of a Machine API resource operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + type: object + type: array + currentHealthy: + description: total number of machines counted by this machine health check + minimum: 0.0 + type: integer + expectedMachines: + description: total number of machines counted by this machine health check + minimum: 0.0 + type: integer + remediationsAllowed: + description: RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied + format: int32 + minimum: 0.0 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machines.yaml b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machines.yaml new file mode 100644 index 000000000..93f9cb0de --- /dev/null +++ b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machines.yaml @@ -0,0 +1,329 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/948 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: machines.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: Machine + listKind: MachineList + plural: machines + singular: machine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Phase of machine + jsonPath: .status.phase + name: Phase + type: string + - description: Type of instance + jsonPath: .metadata.labels['machine\.openshift\.io/instance-type'] + name: Type + type: string + - description: Region associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/region'] + name: Region + type: string + - description: Zone associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/zone'] + name: Zone + type: string + - description: Machine age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Node associated with machine + jsonPath: .status.nodeRef.name + name: Node + priority: 1 + type: string + - description: Provider ID of machine created in cloud provider + jsonPath: .spec.providerID + name: ProviderID + priority: 1 + type: string + - description: State of instance + jsonPath: .metadata.annotations['machine\.openshift\.io/instance-state'] + name: State + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: 'Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSpec defines the desired state of Machine + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + x-kubernetes-map-type: atomic + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: object + status: + description: MachineStatus defines the observed state of Machine + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines the current state of the Machine + items: + description: Condition defines an observation of a Machine API resource operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + type: object + type: array + errorMessage: + description: "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + lastOperation: + description: LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. + properties: + description: + description: Description is the human-readable description of the last operation. + type: string + lastUpdated: + description: LastUpdated is the timestamp at which LastOperation API was last-updated. + format: date-time + type: string + state: + description: State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc + type: string + type: + description: Type is the type of operation which was last performed. E.g. Create, Delete, Update etc + type: string + type: object + lastUpdated: + description: LastUpdated identifies when this status was last observed. + format: date-time + type: string + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + phase: + description: 'Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting' + type: string + providerStatus: + description: ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinesets.yaml b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinesets.yaml new file mode 100644 index 000000000..86cb47918 --- /dev/null +++ b/crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinesets.yaml @@ -0,0 +1,350 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: machinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineSet + listKind: MachineSetList + plural: machinesets + singular: machineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Observed number of available replicas + jsonPath: .status.availableReplicas + name: Available + type: string + - description: Machineset age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSetSpec defines the desired state of MachineSet + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + enum: + - Random + - Newest + - Oldest + type: string + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) + format: int32 + type: integer + replicas: + default: 1 + description: Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. + format: int32 + type: integer + selector: + description: 'Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + template: + description: Template is the object that describes the machine that will be created if insufficient replicas are detected. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + x-kubernetes-map-type: atomic + type: array + type: object + spec: + description: 'Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + items: + description: LifecycleHook represents a single instance of a lifecycle hook + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + maxLength: 512 + minLength: 3 + type: string + required: + - name + - owner + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + x-kubernetes-map-type: atomic + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: object + type: object + type: object + status: + description: MachineSetStatus defines the observed state of MachineSet + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + format: int32 + type: integer + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption. \n These fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels of the machine template of the MachineSet. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration reflects the generation of the most recently observed MachineSet. + format: int64 + type: integer + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + format: int32 + type: integer + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertingrules.yaml b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertingrules.yaml new file mode 100644 index 000000000..00b449790 --- /dev/null +++ b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertingrules.yaml @@ -0,0 +1,128 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1406 + description: OpenShift Monitoring alerting rules + name: alertingrules.monitoring.openshift.io +spec: + group: monitoring.openshift.io + names: + kind: AlertingRule + listKind: AlertingRuleList + plural: alertingrules + singular: alertingrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage. \n The API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly. \n You can find upstream API documentation for PrometheusRule resources here: \n https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the desired state of this AlertingRule object. + properties: + groups: + description: "groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed. \n It's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case." + items: + description: RuleGroup is a list of sequentially evaluated alerting rules. + properties: + interval: + description: 'interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval' + maxLength: 2048 + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + name: + description: name is the name of the group. + maxLength: 2048 + minLength: 1 + type: string + rules: + description: rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed. + items: + description: 'Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules' + properties: + alert: + description: alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character. + maxLength: 2048 + minLength: 1 + type: string + annotations: + additionalProperties: + type: string + description: annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links. + type: object + expr: + anyOf: + - type: integer + - type: string + description: 'expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple "1" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing "Watchdog" alert in order to ensure the alerting pipeline is functional.' + x-kubernetes-int-or-string: true + for: + description: for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending. + maxLength: 2048 + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + labels: + additionalProperties: + type: string + description: 'labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key:' + type: object + required: + - alert + - expr + type: object + minItems: 1 + type: array + required: + - name + - rules + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - groups + type: object + status: + description: status describes the current state of this AlertOverrides object. + properties: + observedGeneration: + description: observedGeneration is the last generation change you've dealt with. + format: int64 + type: integer + prometheusRule: + description: prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. + properties: + name: + description: name of the referenced PrometheusRule. + maxLength: 2048 + minLength: 1 + type: string + required: + - name + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.args b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.yaml b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.yaml new file mode 100644 index 000000000..fbb9f4e40 --- /dev/null +++ b/crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.yaml @@ -0,0 +1,166 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1406 + description: OpenShift Monitoring alert relabel configurations + name: alertrelabelconfigs.monitoring.openshift.io +spec: + group: monitoring.openshift.io + names: + kind: AlertRelabelConfig + listKind: AlertRelabelConfigList + plural: alertrelabelconfigs + singular: alertrelabelconfig + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "AlertRelabelConfig defines a set of relabel configs for alerts. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the desired state of this AlertRelabelConfig object. + properties: + configs: + description: configs is a list of sequentially evaluated alert relabel configs. + items: + description: 'RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + properties: + action: + default: Replace + description: 'action to perform based on regex matching. Must be one of: ''Replace'', ''Keep'', ''Drop'', ''HashMod'', ''LabelMap'', ''LabelDrop'', or ''LabelKeep''. Default is: ''Replace''' + enum: + - Replace + - Keep + - Drop + - HashMod + - LabelMap + - LabelDrop + - LabelKeep + type: string + modulus: + description: modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'. This is only valid if sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'. + format: int64 + type: integer + regex: + default: (.*) + description: 'regex against which the extracted value is matched. Default is: ''(.*)'' regex is required for all actions except ''HashMod''' + maxLength: 2048 + type: string + replacement: + description: 'replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is ''Replace'' or ''LabelMap'' and forbidden for actions ''LabelKeep'' and ''LabelDrop''. Regex capture groups are available. Default is: ''$1''' + maxLength: 2048 + type: string + separator: + description: separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'. + maxLength: 2048 + type: string + sourceLabels: + description: sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the 'Replace', 'Keep', and 'Drop' actions. Not allowed for actions 'LabelKeep' and 'LabelDrop'. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, and underscores. + maxLength: 2048 + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: targetLabel to which the resulting value is written in a 'Replace' action. It is required for 'Replace' and 'HashMod' actions and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available. + maxLength: 2048 + type: string + type: object + x-kubernetes-validations: + - message: relabel action hashmod requires non-zero modulus + rule: self.action != 'HashMod' || self.modulus != 0 + - message: targetLabel is required when action is Replace or HashMod + rule: (self.action != 'Replace' && self.action != 'HashMod') || has(self.targetLabel) + - message: LabelKeep and LabelDrop actions require only 'regex', and no other fields (found sourceLabels) + rule: (self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.sourceLabels) + - message: LabelKeep and LabelDrop actions require only 'regex', and no other fields (found targetLabel) + rule: (self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.targetLabel) + - message: LabelKeep and LabelDrop actions require only 'regex', and no other fields (found modulus) + rule: (self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.modulus) + - message: LabelKeep and LabelDrop actions require only 'regex', and no other fields (found separator) + rule: (self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.separator) + - message: LabelKeep and LabelDrop actions require only 'regex', and no other fields (found replacement) + rule: (self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.replacement) + - message: modulus requires sourceLabels to be present + rule: '!has(self.modulus) || (has(self.modulus) && size(self.sourceLabels) > 0)' + - message: sourceLabels is required for actions Replace, Keep, Drop, HashMod and LabelMap + rule: (self.action == 'LabelDrop' || self.action == 'LabelKeep') || has(self.sourceLabels) + - message: replacement is required for actions Replace and LabelMap + rule: (self.action != 'Replace' && self.action != 'LabelMap') || has(self.replacement) + minItems: 1 + type: array + required: + - configs + type: object + status: + description: status describes the current state of this AlertRelabelConfig object. + properties: + conditions: + description: conditions contains details on the state of the AlertRelabelConfig, may be empty. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/network.openshift.io/v1/clusternetworks.yaml b/crd-catalog/openshift/api/network.openshift.io/v1/clusternetworks.yaml new file mode 100644 index 000000000..63f36c637 --- /dev/null +++ b/crd-catalog/openshift/api/network.openshift.io/v1/clusternetworks.yaml @@ -0,0 +1,102 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: clusternetworks.network.openshift.io +spec: + group: network.openshift.io + names: + kind: ClusterNetwork + listKind: ClusterNetworkList + plural: clusternetworks + singular: clusternetwork + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The primary cluster network CIDR + jsonPath: .network + name: Cluster Network + type: string + - description: The service network CIDR + jsonPath: .serviceNetwork + name: Service Network + type: string + - description: The OpenShift SDN network plug-in in use + jsonPath: .pluginName + name: Plugin Name + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusterNetworks: + description: ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + items: + description: ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. + properties: + CIDR: + description: CIDR defines the total range of a cluster networks address space. + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + hostSubnetLength: + description: HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + format: int32 + maximum: 30.0 + minimum: 2.0 + type: integer + required: + - CIDR + - hostSubnetLength + type: object + type: array + hostsubnetlength: + description: HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + format: int32 + maximum: 30.0 + minimum: 2.0 + type: integer + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + mtu: + description: MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + format: int32 + maximum: 65536.0 + minimum: 576.0 + type: integer + network: + description: Network is a CIDR string specifying the global overlay network's L3 space + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + pluginName: + description: PluginName is the name of the network plugin being used + type: string + serviceNetwork: + description: ServiceNetwork is the CIDR range that Service IP addresses are allocated from + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + vxlanPort: + description: VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - clusterNetworks + - serviceNetwork + type: object + served: true + storage: true +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/network.openshift.io/v1/egressnetworkpolicies.yaml b/crd-catalog/openshift/api/network.openshift.io/v1/egressnetworkpolicies.yaml new file mode 100644 index 000000000..7cf59f2fb --- /dev/null +++ b/crd-catalog/openshift/api/network.openshift.io/v1/egressnetworkpolicies.yaml @@ -0,0 +1,71 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: egressnetworkpolicies.network.openshift.io +spec: + group: network.openshift.io + names: + kind: EgressNetworkPolicy + listKind: EgressNetworkPolicyList + plural: egressnetworkpolicies + singular: egressnetworkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the current egress network policy + properties: + egress: + description: egress contains the list of egress policy rules + items: + description: EgressNetworkPolicyRule contains a single egress network policy rule + properties: + to: + description: to is the target that traffic is allowed/denied to + properties: + cidrSelector: + description: CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead. + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + dnsName: + description: DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: string + type: object + type: + description: type marks this as an "Allow" or "Deny" rule + pattern: ^Allow|Deny$ + type: string + required: + - to + - type + type: object + type: array + required: + - egress + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/network.openshift.io/v1/hostsubnets.yaml b/crd-catalog/openshift/api/network.openshift.io/v1/hostsubnets.yaml new file mode 100644 index 000000000..bea63bb6e --- /dev/null +++ b/crd-catalog/openshift/api/network.openshift.io/v1/hostsubnets.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: hostsubnets.network.openshift.io +spec: + group: network.openshift.io + names: + kind: HostSubnet + listKind: HostSubnetList + plural: hostsubnets + singular: hostsubnet + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of the node + jsonPath: .host + name: Host + type: string + - description: The IP address to be used as a VTEP by other nodes in the overlay network + jsonPath: .hostIP + name: Host IP + type: string + - description: The CIDR range of the overlay network assigned to the node for its pods + jsonPath: .subnet + name: Subnet + type: string + - description: The network egress CIDRs + jsonPath: .egressCIDRs + name: Egress CIDRs + type: string + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 + schema: + openAPIV3Schema: + description: "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressCIDRs: + description: EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only. + items: + description: HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node represented by the HostSubnet + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + type: array + egressIPs: + description: EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs. + items: + description: HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by HostSubnet + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + type: string + type: array + host: + description: Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + pattern: ^[a-z0-9.-]+$ + type: string + hostIP: + description: HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + subnet: + description: Subnet is the CIDR range of the overlay network assigned to the node for its pods + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + type: string + required: + - host + - hostIP + - subnet + type: object + served: true + storage: true +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/network.openshift.io/v1/netnamespaces.yaml b/crd-catalog/openshift/api/network.openshift.io/v1/netnamespaces.yaml new file mode 100644 index 000000000..1d524145d --- /dev/null +++ b/crd-catalog/openshift/api/network.openshift.io/v1/netnamespaces.yaml @@ -0,0 +1,66 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: netnamespaces.network.openshift.io +spec: + group: network.openshift.io + names: + kind: NetNamespace + listKind: NetNamespaceList + plural: netnamespaces + singular: netnamespace + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The network identifier of the network namespace + jsonPath: .netid + name: NetID + type: integer + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 + schema: + openAPIV3Schema: + description: "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressIPs: + description: EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.) + items: + description: NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming from pods in this namespace + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + type: string + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + netid: + description: NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + format: int32 + maximum: 16777215.0 + minimum: 0.0 + type: integer + netname: + description: NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + pattern: ^[a-z0-9.-]+$ + type: string + required: + - netid + - netname + type: object + served: true + storage: true +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.args b/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.yaml b/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.yaml new file mode 100644 index 000000000..af086c701 --- /dev/null +++ b/crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.yaml @@ -0,0 +1,207 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/851 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + name: egressrouters.network.operator.openshift.io +spec: + group: network.operator.openshift.io + names: + kind: EgressRouter + listKind: EgressRouterList + plural: egressrouters + singular: egressrouter + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[*].type + name: Condition + type: string + - jsonPath: .status.conditions[*].status + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "EgressRouter is a feature allowing the user to define an egress router that acts as a bridge between pods and external systems. The egress router runs a service that redirects egress traffic originating from a pod or a group of pods to a remote external system or multiple destinations as per configuration. \n It is consumed by the cluster-network-operator. More specifically, given an EgressRouter CR with , the CNO will create and manage: - A service called - An egress pod called - A NAD called \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). \n EgressRouter is a single egressrouter pod configuration object." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired egress router. + oneOf: + - properties: + mode: + enum: + - Redirect + required: + - redirect + properties: + addresses: + description: List of IP addresses to configure on the pod's secondary interface. + items: + description: EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface + properties: + gateway: + anyOf: + - format: ipv4 + - format: ipv6 + description: IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. + type: string + ip: + description: IP is the address to configure on the router's interface. Can be IPv4 or IPv6. + type: string + required: + - ip + type: object + type: array + mode: + default: Redirect + description: Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + enum: + - Redirect + type: string + networkInterface: + default: + macvlan: + mode: Bridge + description: Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported. + oneOf: + - required: + - macvlan + properties: + macvlan: + default: + mode: Bridge + description: Arguments specific to the interfaceType macvlan + properties: + master: + description: Name of the master interface. Need not be specified if it can be inferred from the IP address. + type: string + mode: + default: Bridge + description: Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + enum: + - Bridge + - Private + - VEPA + - Passthru + type: string + required: + - mode + type: object + type: object + redirect: + description: Redirect represents the configuration parameters specific to redirect mode. + properties: + fallbackIP: + anyOf: + - format: ipv4 + - format: ipv6 + description: FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. + type: string + redirectRules: + description: List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. + items: + description: L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. + properties: + destinationIP: + anyOf: + - format: ipv4 + - format: ipv6 + description: IP specifies the remote destination's IP address. Can be IPv4 or IPv6. + type: string + port: + description: Port is the port number to which clients should send traffic to be redirected. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + protocol: + description: Protocol can be TCP, SCTP or UDP. + enum: + - TCP + - UDP + - SCTP + type: string + targetPort: + description: TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from "Port" is used. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - destinationIP + - port + - protocol + type: object + type: array + type: object + required: + - addresses + - mode + - networkInterface + type: object + status: + description: Observed status of EgressRouter. + properties: + conditions: + description: Observed status of the egress router + items: + description: EgressRouterStatusCondition represents the state of the egress router's managed and monitored components. + properties: + lastTransitionTime: + description: LastTransitionTime is the time of the last update to the current status property. + format: date-time + nullable: true + type: string + message: + description: Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: Reason is the CamelCase reason for the condition's current status. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded + enum: + - Available + - Progressing + - Degraded + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + required: + - conditions + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: '' + plural: '' + conditions: [] + storedVersions: [] diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/authentications.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/authentications.yaml new file mode 100644 index 000000000..1d1f7d3ed --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/authentications.yaml @@ -0,0 +1,140 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: authentications.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Authentication + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Authentication provides information to configure an operator to manage authentication. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + oauthAPIServer: + description: OAuthAPIServer holds status specific only to oauth-apiserver + properties: + latestAvailableRevision: + description: LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + format: int32 + minimum: 0.0 + type: integer + type: object + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/cloudcredentials.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/cloudcredentials.yaml new file mode 100644 index 000000000..8ecd3cf8b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/cloudcredentials.yaml @@ -0,0 +1,143 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/692 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: cloudcredentials.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CloudCredential + listKind: CloudCredentialList + plural: cloudcredentials + singular: cloudcredential + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "CloudCredential provides a means to configure an operator to manage CredentialsRequests. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. + properties: + credentialsMode: + description: 'CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into "manual" mode if desired. Leaving the field in default mode runs CCO so that the cluster''s cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes: AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" Others: Do not set value as other platforms only support running in "Passthrough"' + enum: + - '' + - Manual + - Mint + - Passthrough + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: CloudCredentialStatus defines the observed status of the cloud-credential-operator. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.args b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml new file mode 100644 index 000000000..44c98b4c3 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml @@ -0,0 +1,271 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - file.csi.azure.com + - filestore.csi.storage.gke.io + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io + - powervs.csi.ibm.com + - secrets-store.csi.k8s.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + driverConfig: + description: driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object + driverType: + description: 'driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.' + enum: + - '' + - AWS + - Azure + - GCP + - IBMCloud + - vSphere + type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object + ibmcloud: + description: ibmcloud is used to configure the IBM Cloud CSI driver. + properties: + encryptionKeyCRN: + description: encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes. + maxLength: 154 + minLength: 144 + pattern: ^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$ + type: string + required: + - encryptionKeyCRN + type: object + vSphere: + description: vsphere is used to configure the vsphere CSI driver. + properties: + topologyCategories: + description: topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected. + items: + type: string + type: array + type: object + required: + - driverType + type: object + x-kubernetes-validations: + - message: ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise + rule: 'has(self.driverType) && self.driverType == ''IBMCloud'' ? has(self.ibmcloud) : !has(self.ibmcloud)' + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + storageClassState: + description: StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed. + enum: + - '' + - Managed + - Unmanaged + - Removed + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/configs.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/configs.yaml new file mode 100644 index 000000000..26acd5079 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/configs.yaml @@ -0,0 +1,136 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/612 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: configs.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Config + plural: configs + singular: config + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Config Operator. + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status defines the observed status of the Config Operator. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.args b/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.yaml new file mode 100644 index 000000000..59bac79fb --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/consoles.yaml @@ -0,0 +1,433 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/486 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: consoles.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Console provides a means to configure an operator to manage the console. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleSpec is the specification of the desired behavior of the Console. + properties: + customization: + description: customization is used to optionally provide a small set of customization options to the web console. + properties: + addPage: + description: addPage allows customizing actions on the Add page in developer perspective. + properties: + disabledActions: + description: disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID. + items: + type: string + minItems: 1 + type: array + type: object + brand: + description: brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout. + enum: + - openshift + - OpenShift + - OKD + - Online + - OCP + - Dedicated + - Azure + - ROSA + type: string + customLogoFile: + description: 'customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like ''oc create configmap custom-logo --from-file=/path/to/file -n openshift-config''. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred' + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + type: object + customProductName: + description: customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name. + type: string + developerCatalog: + description: developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). + properties: + categories: + description: categories which are shown in the developer catalog. + items: + description: DeveloperConsoleCatalogCategory for the developer console catalog. + properties: + id: + description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + type: string + label: + description: label defines a category display label. It is required and must have 1-64 characters. + maxLength: 64 + minLength: 1 + type: string + subcategories: + description: subcategories defines a list of child categories. + items: + description: DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. + properties: + id: + description: ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + type: string + label: + description: label defines a category display label. It is required and must have 1-64 characters. + maxLength: 64 + minLength: 1 + type: string + tags: + description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + items: + type: string + type: array + required: + - id + - label + type: object + type: array + tags: + description: tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + items: + type: string + type: array + required: + - id + - label + type: object + type: array + types: + description: types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown. + properties: + disabled: + description: 'disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: "Devfile", "HelmChart", "BuilderImage" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.' + items: + type: string + type: array + x-kubernetes-list-type: set + enabled: + description: 'enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: "Devfile", "HelmChart", "BuilderImage" If the list is non-empty, a new type will not be shown to the user until it is added to list. If the list is empty the complete developer catalog will be shown.' + items: + type: string + type: array + x-kubernetes-list-type: set + state: + default: Enabled + description: state defines if a list of catalog types should be enabled or disabled. + enum: + - Enabled + - Disabled + type: string + required: + - state + type: object + x-kubernetes-validations: + - message: enabled is forbidden when state is not Enabled + rule: 'self.state == ''Enabled'' ? true : !has(self.enabled)' + - message: disabled is forbidden when state is not Disabled + rule: 'self.state == ''Disabled'' ? true : !has(self.disabled)' + type: object + documentationBaseURL: + description: documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout. + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$ + type: string + perspectives: + description: perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. + items: + description: Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown + properties: + id: + description: 'id defines the id of the perspective. Example: "dev", "admin". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.' + type: string + pinnedResources: + description: pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. The list of available Kubernetes resources could be read via `kubectl api-resources`. The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. Incorrect or unknown resources will be ignored. + items: + description: PinnedResourceReference includes the group, version and type of resource + properties: + group: + description: 'group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc.' + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + resource: + description: 'resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc.' + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + version: + description: 'version is the API Version of the Resource. This value should consist of only lowercase alphanumeric characters. Example: "v1", "v1beta1", etc.' + pattern: ^[a-z0-9]+$ + type: string + required: + - group + - resource + - version + type: object + maxItems: 100 + type: array + visibility: + description: visibility defines the state of perspective along with access review checks if needed for that perspective. + properties: + accessReview: + description: accessReview defines required and missing access review checks. + minProperties: 1 + properties: + missing: + description: missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list. + items: + description: ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface + properties: + group: + description: Group is the API Group of the Resource. "*" means all. + type: string + name: + description: Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + type: string + namespace: + description: Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + type: string + resource: + description: Resource is one of the existing resource types. "*" means all. + type: string + subresource: + description: Subresource is one of the existing resource types. "" means none. + type: string + verb: + description: 'Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.' + type: string + version: + description: Version is the API Version of the Resource. "*" means all. + type: string + type: object + type: array + required: + description: required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list. + items: + description: ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface + properties: + group: + description: Group is the API Group of the Resource. "*" means all. + type: string + name: + description: Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + type: string + namespace: + description: Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + type: string + resource: + description: Resource is one of the existing resource types. "*" means all. + type: string + subresource: + description: Subresource is one of the existing resource types. "" means none. + type: string + verb: + description: 'Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.' + type: string + version: + description: Version is the API Version of the Resource. "*" means all. + type: string + type: object + type: array + type: object + state: + description: state defines the perspective is enabled or disabled or access review check is required. + enum: + - Enabled + - Disabled + - AccessReview + type: string + required: + - state + type: object + x-kubernetes-validations: + - message: accessReview configuration is required when state is AccessReview, and forbidden otherwise + rule: 'self.state == ''AccessReview'' ? has(self.accessReview) : !has(self.accessReview)' + required: + - id + - visibility + type: object + x-kubernetes-validations: + - message: pinnedResources is allowed only for dev and forbidden for other perspectives + rule: 'has(self.id) && self.id != ''dev''? !has(self.pinnedResources) : true' + type: array + x-kubernetes-list-map-keys: + - id + x-kubernetes-list-type: map + projectAccess: + description: projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options. + properties: + availableClusterRoles: + description: availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab. + items: + type: string + type: array + type: object + quickStarts: + description: quickStarts allows customization of available ConsoleQuickStart resources in console. + properties: + disabled: + description: disabled is a list of ConsoleQuickStart resource names that are not shown to users. + items: + type: string + type: array + type: object + type: object + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + plugins: + description: plugins defines a list of enabled console plugin names. + items: + type: string + type: array + providers: + description: providers contains configuration for using specific service providers. + properties: + statuspage: + description: statuspage contains ID for statuspage.io page that provides status info about. + properties: + pageID: + description: pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + type: string + type: object + type: object + route: + description: route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED + properties: + hostname: + description: hostname is the desired custom domain under which console will be available. + type: string + secret: + description: 'secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - "tls.crt" - to specifies custom certificate - "tls.key" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.' + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: ConsoleStatus defines the observed status of the Console. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/csisnapshotcontrollers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/csisnapshotcontrollers.yaml new file mode 100644 index 000000000..4ea6e663b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/csisnapshotcontrollers.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/562 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: csisnapshotcontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CSISnapshotController + plural: csisnapshotcontrollers + singular: csisnapshotcontroller + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.args b/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.yaml new file mode 100644 index 000000000..f388bf86e --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/dnses.yaml @@ -0,0 +1,303 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: dnses.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster. \n This supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md \n More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the DNS. + properties: + cache: + description: 'cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift.' + properties: + negativeTTL: + description: "negativeTTL is optional and specifies the amount of time that a negative response should be cached. \n If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject to change." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + positiveTTL: + description: "positiveTTL is optional and specifies the amount of time that a positive response should be cached. \n If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject to change." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + type: object + logLevel: + default: Normal + description: 'logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses. Setting logLevel: Trace will produce extremely verbose logs. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal".' + enum: + - Normal + - Debug + - Trace + type: string + managementState: + description: managementState indicates whether the DNS operator should manage cluster DNS + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodePlacement: + description: "nodePlacement provides explicit control over the scheduling of DNS pods. \n Generally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint. \n If unset, defaults are used. See nodePlacement for more details." + properties: + nodeSelector: + additionalProperties: + type: string + description: "nodeSelector is the node selector applied to DNS pods. \n If empty, the default is used, which is currently the following: \n kubernetes.io/os: linux \n This default is subject to change. \n If set, the specified selector is used and replaces the default." + type: object + tolerations: + description: "tolerations is a list of tolerations applied to DNS pods. \n If empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable. \n Note that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + operatorLogLevel: + default: Normal + description: 'operatorLogLevel controls the logging level of the DNS Operator. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal". setting operatorLogLevel: Trace will produce extremely verbose logs.' + enum: + - Normal + - Debug + - Trace + type: string + servers: + description: "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server. \n For example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\". \n If this field is nil, no servers are created." + items: + description: Server defines the schema for a server that runs per instance of CoreDNS. + properties: + forwardPlugin: + description: forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. + properties: + policy: + default: Random + description: "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: \n * \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. \n The default value is \"Random\"" + enum: + - Random + - RoundRobin + - Sequential + type: string + protocolStrategy: + description: protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + enum: + - TCP + - '' + type: string + transportConfig: + description: "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. \n The default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver." + properties: + tls: + description: tls contains the additional configuration options to use when Transport is set to "TLS". + properties: + caBundle: + description: "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. \n 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName." + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + serverName: + description: serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s). + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - serverName + type: object + transport: + description: "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s). \n Possible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1." + enum: + - TLS + - Cleartext + - '' + type: string + type: object + upstreams: + description: "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53. \n A maximum of 15 upstreams is allowed per ForwardPlugin." + items: + type: string + maxItems: 15 + type: array + type: object + name: + description: name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335. + type: string + zones: + description: zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., "cluster.local") is invalid. + items: + type: string + type: array + type: object + type: array + upstreamResolvers: + default: {} + description: "upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (\".\") server \n If this field is not specified, the upstream used will default to /etc/resolv.conf, with policy \"sequential\"" + properties: + policy: + default: Sequential + description: "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: \n * \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. \n The default value is \"Sequential\"" + enum: + - Random + - RoundRobin + - Sequential + type: string + protocolStrategy: + description: protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + enum: + - TCP + - '' + type: string + transportConfig: + description: "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. \n The default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver." + properties: + tls: + description: tls contains the additional configuration options to use when Transport is set to "TLS". + properties: + caBundle: + description: "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. \n 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName." + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + serverName: + description: serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s). + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - serverName + type: object + transport: + description: "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s). \n Possible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1." + enum: + - TLS + - Cleartext + - '' + type: string + type: object + upstreams: + default: + - type: SystemResolvConf + description: "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. \n A maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default" + items: + anyOf: + - not: + required: + - address + - port + properties: + type: + enum: + - '' + - SystemResolvConf + - properties: + type: + enum: + - Network + required: + - address + description: "Upstream can either be of type SystemResolvConf, or of type Network. \n * For an Upstream of type SystemResolvConf, no further fields are necessary: The upstream will be configured to use /etc/resolv.conf. * For an Upstream of type Network, a NetworkResolver field needs to be defined with an IP address or IP:port if the upstream listens on a port other than 53." + properties: + address: + anyOf: + - format: ipv4 + - format: ipv6 + description: Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address. + type: string + port: + default: 53 + description: Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535 + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + type: + description: "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network. \n * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: /etc/resolv.conf will be used * When Network is used, the Upstream structure must contain at least an Address" + enum: + - SystemResolvConf + - Network + - '' + type: string + required: + - type + type: object + maxItems: 15 + type: array + type: object + type: object + status: + description: status is the most recently observed status of the DNS. + properties: + clusterDomain: + description: "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\" \n More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service" + type: string + clusterIP: + description: "clusterIP is the service IP through which this DNS is made available. \n In the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy. \n In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @ \n More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + type: string + conditions: + description: "conditions provide information about the state of the DNS on the cluster. \n These are the supported DNS conditions: \n * Available - True if the following conditions are met: * DNS controller daemonset is available. - False if any of those conditions are unsatisfied." + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + required: + - clusterDomain + - clusterIP + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/etcds.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/etcds.yaml new file mode 100644 index 000000000..d6fa242fc --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/etcds.yaml @@ -0,0 +1,200 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/752 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: Default + name: etcds.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Etcd + plural: etcds + singular: etcd + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Etcd provides information to configure an operator to manage etcd. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + controlPlaneHardwareSpeed: + description: ControlPlaneHardwareSpeed declares valid hardware speed tolerance levels + enum: + - '' + - Standard + - Slower + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/fixup.sh b/crd-catalog/openshift/api/operator.openshift.io/v1/fixup.sh new file mode 100755 index 000000000..c8350c480 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/fixup.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: The kube-custom-resources-rs Authors +# SPDX-License-Identifier: 0BSD + +project=$(dirname "${0}") +echo "fixing ${project}" + +brand='.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.customization.properties.brand' + +yq --inplace "del(${brand}.enum[] | select(. == \"online\"))" \ + "${project}/consoles.yaml" +yq --inplace "del(${brand}.enum[] | select(. == \"okd\"))" \ + "${project}/consoles.yaml" +yq --inplace "del(${brand}.enum[] | select(. == \"ocp\"))" \ + "${project}/consoles.yaml" +yq --inplace "del(${brand}.enum[] | select(. == \"dedicated\"))" \ + "${project}/consoles.yaml" +yq --inplace "del(${brand}.enum[] | select(. == \"azure\"))" \ + "${project}/consoles.yaml" diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.args b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml new file mode 100644 index 000000000..c0370d9b6 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml @@ -0,0 +1,1137 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/616 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: ingresscontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: IngressController + listKind: IngressControllerList + plural: ingresscontrollers + singular: ingresscontroller + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources. \n When an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers \n Whenever possible, sensible defaults for the platform are used. See each field for more details. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the IngressController. + properties: + clientTLS: + description: clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes. + properties: + allowedSubjectPatterns: + description: allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection. + items: + type: string + type: array + x-kubernetes-list-type: atomic + clientCA: + description: clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientCertificatePolicy: + description: "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\". \n Note that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes." + enum: + - '' + - Required + - Optional + type: string + required: + - clientCA + - clientCertificatePolicy + type: object + defaultCertificate: + description: "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used. \n The secret must contain the following keys and data: \n tls.crt: certificate file contents tls.key: key file contents \n If unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store. \n If a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing. \n The in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + domain: + description: "domain is a DNS name serviced by the ingress controller and is used to configure multiple features: \n * For the LoadBalancerService endpoint publishing strategy, domain is used to configure DNS records. See endpointPublishingStrategy. \n * When using a generated default certificate, the certificate will be valid for domain and its subdomains. See defaultCertificate. \n * The value is published to individual Route statuses so that end-users know where to target external DNS records. \n domain must be unique among all IngressControllers, and cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster .spec.domain." + type: string + endpointPublishingStrategy: + description: "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform: \n AWS: LoadBalancerService (with External scope) Azure: LoadBalancerService (with External scope) GCP: LoadBalancerService (with External scope) IBMCloud: LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService (with External scope) Libvirt: HostNetwork \n Any other platform types (including None) default to HostNetwork. \n endpointPublishingStrategy cannot be updated." + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + properties: + allowedSourceRanges: + description: "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses. \n To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12." + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + nullable: true + type: array + dnsManagementPolicy: + default: Managed + description: 'dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.' + enum: + - Managed + - Unmanaged + type: string + providerParameters: + description: "providerParameters holds desired load balancer information specific to the underlying infrastructure provider. \n If empty, defaults will be applied. See specific providerParameters fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that are specific to AWS load balancers. \n If empty, defaults will be applied. See specific aws fields for details about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + properties: + connectionIdleTimeout: + description: connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + format: duration + type: string + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer to instantiate for an ingresscontroller. \n Valid values are: \n * \"Classic\": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb \n * \"NLB\": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that are specific to GCP load balancers. \n If empty, defaults will be applied. See specific gcp fields for details about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access is restricted for internal load balancers. \n Valid values are: * \"Global\": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access \n * \"Local\": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - '' + - TCP + - PROXY + type: string + type: object + type: + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - Nutanix + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - dnsManagementPolicy + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + type: object + type: + description: "type is the publishing strategy to use. Valid values are: \n * LoadBalancerService \n Publishes the ingress controller using a Kubernetes LoadBalancer Service. \n In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer \n If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. \n Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. \n * HostNetwork \n Publishes the ingress controller on node ports where the ingress controller is deployed. \n In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. \n * Private \n Does not publish the ingress controller. \n In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. \n * NodePortService \n Publishes the ingress controller using a Kubernetes NodePort Service. \n In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + httpCompression: + description: httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression. + properties: + mimeTypes: + description: "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression. \n Note: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2" + items: + description: "CompressionMIMEType defines the format of a single MIME type. E.g. \"text/css; charset=utf-8\", \"text/html\", \"text/*\", \"image/svg+xml\", \"application/octet-stream\", \"X-custom/customsub\", etc. \n The format should follow the Content-Type definition in RFC 1341: Content-Type := type \"/\" subtype *[\";\" parameter] - The type in Content-Type can be one of: application, audio, image, message, multipart, text, video, or a custom type preceded by \"X-\" and followed by a token as defined below. - The token is a string of at least one character, and not containing white space, control characters, or any of the characters in the tspecials set. - The tspecials set contains the characters ()<>@,;:\\\"/[]?.= - The subtype in Content-Type is also a token. - The optional parameter/s following the subtype are defined as: token \"=\" (token / quoted-string) - The quoted-string, as defined in RFC 822, is surrounded by double quotes and can contain white space plus any character EXCEPT \\, \", and CR. It can also contain any single ASCII character as long as it is escaped by \\." + pattern: ^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$ + type: string + type: array + x-kubernetes-list-type: set + type: object + httpEmptyRequestsPolicy: + default: Respond + description: "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\". \n Typically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts." + enum: + - Respond + - Ignore + type: string + httpErrorCodePages: + description: httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format "error-page-.http", where is an HTTP error code. For example, "error-page-503.http" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + httpHeaders: + description: "httpHeaders defines policy for HTTP headers. \n If this field is empty, the default values are used." + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + properties: + request: + description: 'request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + response: + description: 'response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + type: object + forwardedHeaderPolicy: + description: "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following: \n * \"Append\", which specifies that the IngressController appends the headers, preserving existing headers. \n * \"Replace\", which specifies that the IngressController sets the headers, replacing any existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", which specifies that the IngressController sets the headers if they are not already set. \n * \"Never\", which specifies that the IngressController never sets the headers, preserving any existing headers. \n By default, the policy is \"Append\"." + enum: + - Append + - Replace + - IfNone + - Never + type: string + headerNameCaseAdjustments: + description: "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization. \n These adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1. \n For request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses. \n If this field is empty, no request headers are adjusted." + items: + description: IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header (for example, "X-Forwarded-For") in the desired capitalization. The value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + nullable: true + type: array + uniqueId: + description: "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests. \n If this field is empty, no such header is injected into requests." + properties: + format: + description: 'format specifies the format for the injected HTTP header''s value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3' + maxLength: 1024 + minLength: 0 + pattern: ^(%(%|(\{[-+]?[QXE](,[-+]?[QXE])*\})?([A-Za-z]+|\[[.0-9A-Z_a-z]+(\([^)]+\))?(,[.0-9A-Z_a-z]+(\([^)]+\))?)*\]))|[^%[:cntrl:]])*$ + type: string + name: + description: name specifies the name of the HTTP header (for example, "unique-id") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + type: object + type: object + logging: + description: logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled. + properties: + access: + description: "access describes how the client requests should be logged. \n If this field is empty, access logging is disabled." + properties: + destination: + description: destination is where access logs go. + properties: + container: + description: container holds parameters for the Container logging destination. Present only if type is Container. + properties: + maxLength: + default: 1024 + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 8192, inclusive. \n When omitted, the default value is 1024." + format: int32 + maximum: 8192.0 + minimum: 480.0 + type: integer + type: object + syslog: + description: syslog holds parameters for a syslog endpoint. Present only if type is Syslog. + oneOf: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 + properties: + address: + description: address is the IP address of the syslog endpoint that receives log messages. + type: string + facility: + description: "facility specifies the syslog facility of log messages. \n If this field is empty, the facility is \"local1\"." + enum: + - kern + - user + - mail + - daemon + - auth + - syslog + - lpr + - news + - uucp + - cron + - auth2 + - ftp + - ntp + - audit + - alert + - cron2 + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + type: string + maxLength: + default: 1024 + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 4096, inclusive. \n When omitted, the default value is 1024." + format: int32 + maximum: 4096.0 + minimum: 480.0 + type: integer + port: + description: port is the UDP port number of the syslog endpoint that receives log messages. + format: int32 + maximum: 65535.0 + minimum: 1.0 + type: integer + required: + - address + - port + type: object + type: + description: "type is the type of destination for logs. It must be one of the following: \n * Container \n The ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity. \n * Syslog \n Logs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance." + enum: + - Container + - Syslog + type: string + required: + - type + type: object + httpCaptureCookies: + description: httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured. + items: + description: IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured. + properties: + matchType: + description: matchType specifies the type of match to be performed on the cookie name. Allowed values are "Exact" for an exact string match and "Prefix" for a string prefix match. If "Exact" is specified, a name must be specified in the name field. If "Prefix" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType "Prefix" and namePrefix "foo" will capture a cookie named "foo" or "foobar" but not one named "bar". The first matching cookie is captured. + enum: + - Exact + - Prefix + type: string + maxLength: + description: maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + maximum: 1024.0 + minimum: 1.0 + type: integer + name: + description: name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + namePrefix: + description: namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + required: + - matchType + - maxLength + type: object + maxItems: 1 + nullable: true + type: array + httpCaptureHeaders: + description: "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured. \n Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections." + properties: + request: + description: "request specifies which HTTP request headers to capture. \n If this field is empty, no request headers are captured." + items: + description: IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + minimum: 1.0 + type: integer + name: + description: name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + response: + description: "response specifies which HTTP response headers to capture. \n If this field is empty, no response headers are captured." + items: + description: IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + minimum: 1.0 + type: integer + name: + description: name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + type: object + httpLogFormat: + description: "httpLogFormat specifies the format of the log message for an HTTP request. \n If this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 \n Note that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections." + type: string + logEmptyRequests: + default: Log + description: logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections ("preconnect"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are "Log" and "Ignore". The default value is "Log". + enum: + - Log + - Ignore + type: string + required: + - destination + type: object + type: object + namespaceSelector: + description: "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodePlacement: + description: "nodePlacement enables explicit control over the scheduling of the ingress controller. \n If unset, defaults are used. See NodePlacement for more details." + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to ingress controller deployments. \n If set, the specified selector is used and replaces the default. \n If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. \n When defaultPlacement is Workers, the default is: \n kubernetes.io/os: linux node-role.kubernetes.io/worker: '' \n When defaultPlacement is ControlPlane, the default is: \n kubernetes.io/os: linux node-role.kubernetes.io/master: '' \n These defaults are subject to change. \n Note that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + tolerations: + description: "tolerations is a list of tolerations applied to ingress controller deployments. \n The default is an empty list. \n See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. \n The value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively. \n These defaults are subject to change." + format: int32 + type: integer + routeAdmission: + description: "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). \n If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults." + properties: + namespaceOwnership: + description: "namespaceOwnership describes how host name claims across namespaces should be handled. \n Value must be one of: \n - Strict: Do not allow routes in different namespaces to claim the same host. \n - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. \n If empty, the default is Strict." + enum: + - InterNamespaceAllowed + - Strict + type: string + wildcardPolicy: + description: "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy. \n [1] https://github.com/openshift/api/blob/master/route/v1/types.go \n Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller. \n WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. \n If empty, defaults to \"WildcardsDisallowed\"." + enum: + - WildcardsAllowed + - WildcardsDisallowed + type: string + type: object + routeSelector: + description: "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. \n If unset, the default is based on the apiservers.config.openshift.io/cluster resource. \n Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout." + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + nullable: true + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + nullable: true + type: object + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + nullable: true + type: object + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + nullable: true + type: object + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + enum: + - Old + - Intermediate + - Modern + - Custom + type: string + type: object + tuningOptions: + anyOf: + - properties: + maxConnections: + enum: + - -1 + - 0 + - properties: + maxConnections: + format: int32 + maximum: 2000000.0 + minimum: 2000.0 + description: "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details. \n Setting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations." + properties: + clientFinTimeout: + description: "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection. \n If unset, the default timeout is 1s" + format: duration + type: string + clientTimeout: + description: "clientTimeout defines how long a connection will be held open while waiting for a client response. \n If unset, the default timeout is 30s" + format: duration + type: string + headerBufferBytes: + description: "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes. \n Setting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary." + format: int32 + minimum: 16384.0 + type: integer + headerBufferMaxRewriteBytes: + description: "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes. \n Setting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary." + format: int32 + minimum: 4096.0 + type: integer + healthCheckInterval: + description: "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\". \n Expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\". \n Setting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such. \n An empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s. \n Currently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + maxConnections: + description: "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed. \n Permitted values are: empty, 0, -1, and the range 2000-2000000. \n If this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases. \n If the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000. \n Setting a value that is greater than the current operating system limit will prevent the HAProxy process from starting. \n If you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime. \n You can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'. \n You can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'." + format: int32 + type: integer + reloadInterval: + description: "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly. \n The value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default). \n A zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice. \n This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\". \n Note: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + serverFinTimeout: + description: "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection. \n If unset, the default timeout is 1s" + format: duration + type: string + serverTimeout: + description: "serverTimeout defines how long a connection will be held open while waiting for a server/backend response. \n If unset, the default timeout is 30s" + format: duration + type: string + threadCount: + description: "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases. \n Setting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly." + format: int32 + maximum: 64.0 + minimum: 1.0 + type: integer + tlsInspectDelay: + description: "tlsInspectDelay defines how long the router can hold data to find a matching route. \n Setting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used. \n If unset, the default inspect delay is 5s" + format: duration + type: string + tunnelTimeout: + description: "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle. \n If unset, the default timeout is 1h" + format: duration + type: string + type: object + unsupportedConfigOverrides: + description: unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the IngressController. + properties: + availableReplicas: + description: availableReplicas is number of observed available replicas according to the ingress controller deployment. + format: int32 + type: integer + conditions: + description: "conditions is a list of conditions and their status. \n Available means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas) \n There are additional conditions which indicate the status of other ingress controller features and capabilities. \n * LoadBalancerManaged - True if the following conditions are met: * The endpoint publishing strategy requires a service load balancer. - False if any of those conditions are unsatisfied. \n * LoadBalancerReady - True if the following conditions are met: * A load balancer is managed. * The load balancer is ready. - False if any of those conditions are unsatisfied. \n * DNSManaged - True if the following conditions are met: * The endpoint publishing strategy and platform support DNS. * The ingress controller domain is set. * dns.config.openshift.io/cluster configures DNS zones. - False if any of those conditions are unsatisfied. \n * DNSReady - True if the following conditions are met: * DNS is managed. * DNS records have been successfully created. - False if any of those conditions are unsatisfied." + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + domain: + description: domain is the actual domain in use. + type: string + endpointPublishingStrategy: + description: endpointPublishingStrategy is the actual strategy in use. + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + format: int32 + maximum: 65535.0 + minimum: 0.0 + type: integer + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + properties: + allowedSourceRanges: + description: "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses. \n To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12." + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + nullable: true + type: array + dnsManagementPolicy: + default: Managed + description: 'dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.' + enum: + - Managed + - Unmanaged + type: string + providerParameters: + description: "providerParameters holds desired load balancer information specific to the underlying infrastructure provider. \n If empty, defaults will be applied. See specific providerParameters fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that are specific to AWS load balancers. \n If empty, defaults will be applied. See specific aws fields for details about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + properties: + connectionIdleTimeout: + description: connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + format: duration + type: string + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer to instantiate for an ingresscontroller. \n Valid values are: \n * \"Classic\": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb \n * \"NLB\": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that are specific to GCP load balancers. \n If empty, defaults will be applied. See specific gcp fields for details about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access is restricted for internal load balancers. \n Valid values are: * \"Global\": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access \n * \"Local\": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - '' + - TCP + - PROXY + type: string + type: object + type: + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - Nutanix + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - dnsManagementPolicy + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - '' + - TCP + - PROXY + type: string + type: object + type: + description: "type is the publishing strategy to use. Valid values are: \n * LoadBalancerService \n Publishes the ingress controller using a Kubernetes LoadBalancer Service. \n In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer \n If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. \n Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. \n * HostNetwork \n Publishes the ingress controller on node ports where the ingress controller is deployed. \n In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. \n * Private \n Does not publish the ingress controller. \n In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. \n * NodePortService \n Publishes the ingress controller using a Kubernetes NodePort Service. \n In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + namespaceSelector: + description: namespaceSelector is the actual namespaceSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + observedGeneration: + description: observedGeneration is the most recent generation observed. + format: int64 + type: integer + routeSelector: + description: routeSelector is the actual routeSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + selector: + description: selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas. + type: string + tlsProfile: + description: tlsProfile is the TLS connection configuration that is in effect. + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.args b/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.yaml new file mode 100644 index 000000000..308986dd1 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.yaml @@ -0,0 +1,260 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1237 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: insightsoperators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: InsightsOperator + listKind: InsightsOperatorList + plural: insightsoperators + singular: insightsoperator + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "InsightsOperator holds cluster-wide information about the Insights Operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Insights. + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Insights operator. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + gatherStatus: + description: gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet. + properties: + gatherers: + description: gatherers is a list of active gatherers (and their statuses) in the last gathering. + items: + description: gathererStatus represents information about a particular data gatherer. + properties: + conditions: + description: conditions provide details on the status of each gatherer. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + lastGatherDuration: + description: lastGatherDuration represents the time spent gathering. + pattern: ^([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + name: + description: name is the name of the gatherer. + maxLength: 256 + minLength: 5 + type: string + required: + - conditions + - lastGatherDuration + - name + type: object + type: array + x-kubernetes-list-type: atomic + lastGatherDuration: + description: lastGatherDuration is the total time taken to process all gatherers during the last gather event. + pattern: ^0|([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: string + lastGatherTime: + description: lastGatherTime is the last time when Insights data gathering finished. An empty value means that no data has been gathered yet. + format: date-time + type: string + type: object + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + insightsReport: + description: insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet. + properties: + downloadedAt: + description: downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + format: date-time + type: string + healthChecks: + description: healthChecks provides basic information about active Insights health checks in a cluster. + items: + description: healthCheck represents an Insights health check attributes. + properties: + advisorURI: + description: advisorURI provides the URL link to the Insights Advisor. + pattern: ^https:\/\/\S+ + type: string + description: + description: description provides basic description of the healtcheck. + maxLength: 2048 + minLength: 10 + type: string + state: + description: state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. + enum: + - Enabled + - Disabled + type: string + totalRisk: + description: totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. + format: int32 + maximum: 4.0 + minimum: 1.0 + type: integer + required: + - advisorURI + - description + - state + - totalRisk + type: object + type: array + x-kubernetes-list-type: atomic + type: object + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/kubeapiservers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/kubeapiservers.yaml new file mode 100644 index 000000000..43efd4c8e --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/kubeapiservers.yaml @@ -0,0 +1,205 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeAPIServer + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "KubeAPIServer provides information to configure an operator to manage kube-apiserver. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes API Server + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + serviceAccountIssuers: + description: 'serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection' + items: + properties: + expirationTime: + description: expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list of service account issuers. + format: date-time + type: string + name: + description: name is the name of the service account issuer --- + type: string + type: object + type: array + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/kubecontrollermanagers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/kubecontrollermanagers.yaml new file mode 100644 index 000000000..85e4734b3 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/kubecontrollermanagers.yaml @@ -0,0 +1,198 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: kubecontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeControllerManager + plural: kubecontrollermanagers + singular: kubecontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "KubeControllerManager provides information to configure an operator to manage kube-controller-manager. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes Controller Manager + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + useMoreSecureServiceCA: + default: false + description: useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content. + type: boolean + type: object + status: + description: status is the most recently observed status of the Kubernetes Controller Manager + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/kubeschedulers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/kubeschedulers.yaml new file mode 100644 index 000000000..0fede6e8e --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/kubeschedulers.yaml @@ -0,0 +1,194 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: kubeschedulers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeScheduler + plural: kubeschedulers + singular: kubescheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "KubeScheduler provides information to configure an operator to manage scheduler. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Kubernetes Scheduler + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes Scheduler + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/kubestorageversionmigrators.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/kubestorageversionmigrators.yaml new file mode 100644 index 000000000..bcb6b2c4e --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/kubestorageversionmigrators.yaml @@ -0,0 +1,133 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/503 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: kubestorageversionmigrators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeStorageVersionMigrator + listKind: KubeStorageVersionMigratorList + plural: kubestorageversionmigrators + singular: kubestorageversionmigrator + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/machineconfigurations.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/machineconfigurations.yaml new file mode 100644 index 000000000..614edd1d5 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/machineconfigurations.yaml @@ -0,0 +1,192 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "MachineConfiguration provides information to configure an operator to manage Machine Configuration. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the Machine Config Operator + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Machine Config Operator + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across individual nodes + items: + description: NodeStatus provides information about the current state of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/networks.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/networks.yaml new file mode 100644 index 000000000..88f86f731 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/networks.yaml @@ -0,0 +1,577 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. + items: + description: AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. + properties: + name: + description: name is the name of the network. This will be populated in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + properties: + addresses: + description: Addresses configures IP address for the interface + items: + description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses + properties: + address: + description: Address is the IP address in CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet to designate as the gateway + type: string + type: object + type: array + dns: + description: DNS configures DNS for the interface + properties: + domain: + description: Domain configures the domainname the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers for IP lookup + items: + type: string + type: array + search: + description: Search configures priority ordered search domains for short hostname lookups + items: + type: string + type: array + type: object + routes: + description: Routes configures IP routes for the interface + items: + description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes + properties: + destination: + description: Destination points the IP route destination + type: string + gateway: + description: Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). + type: string + type: object + type: array + type: object + type: + description: Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic + type: string + type: object + master: + description: master is the host interface to create the macvlan interface from. If not specified, it will be default route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. + format: int32 + minimum: 0.0 + type: integer + type: object + type: + description: type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + type: object + type: array + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + properties: + cidr: + type: string + hostPrefix: + format: int32 + minimum: 0.0 + type: integer + type: object + type: array + defaultNetwork: + description: defaultNetwork is the "default" network that all pods will receive + properties: + kuryrConfig: + description: KuryrConfig configures the kuryr plugin + properties: + controllerProbesPort: + description: The port kuryr-controller will listen for readiness and liveness requests. + format: int32 + minimum: 0.0 + type: integer + daemonProbesPort: + description: The port kuryr-daemon will listen for readiness and liveness requests. + format: int32 + minimum: 0.0 + type: integer + enablePortPoolsPrepopulation: + description: enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default. + type: boolean + mtu: + description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator. + format: int32 + minimum: 0.0 + type: integer + openStackServiceNetwork: + description: openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1. + type: string + poolBatchPorts: + description: poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting. + minimum: 0.0 + type: integer + poolMaxPorts: + description: poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting. + minimum: 0.0 + type: integer + poolMinPorts: + description: poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting. + minimum: 1.0 + type: integer + type: object + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + properties: + enableUnidling: + description: enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. + format: int32 + minimum: 0.0 + type: integer + useExternalOpenvswitch: + description: 'useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6' + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. The default is 4789. + format: int32 + minimum: 0.0 + type: integer + type: object + ovnKubernetesConfig: + description: ovnKubernetesConfig configures the ovn-kubernetes plugin. + properties: + egressIPConfig: + description: egressIPConfig holds the configuration for EgressIP options. + properties: + reachabilityTotalTimeoutSeconds: + description: reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check. + format: int32 + maximum: 60.0 + minimum: 0.0 + type: integer + type: object + gatewayConfig: + description: gatewayConfig holds the configuration for node gateway options. + properties: + ipForwarding: + description: IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". The supported values are "Restricted" and "Global". + type: string + ipv4: + description: ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values. + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format + maxLength: 18 + type: string + x-kubernetes-validations: + - message: CIDR format must contain exactly one '/' + rule: self.indexOf('/') == self.lastIndexOf('/') + - message: subnet must be in the range /0 to /29 inclusive + rule: '[int(self.split(''/'')[1])].all(x, x <= 29 && x >= 0)' + - message: a valid IPv4 address must contain 4 octets + rule: self.split('/')[0].split('.').size() == 4 + - message: first IP address octet must not contain leading zeros, must be greater than 0 and less or equal to 255 + rule: '[self.findAll(''[0-9]+'')[0]].all(x, x != ''0'' && int(x) <= 255 && !x.startsWith(''0''))' + - message: IP address octets must not contain leading zeros, and must be less or equal to 255 + rule: '[self.findAll(''[0-9]+'')[1], self.findAll(''[0-9]+'')[2], self.findAll(''[0-9]+'')[3]].all(x, int(x) <= 255 && (x == ''0'' || !x.startsWith(''0'')))' + type: object + ipv6: + description: ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values. + properties: + internalMasqueradeSubnet: + description: internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted + type: string + x-kubernetes-validations: + - message: CIDR format must contain exactly one '/' + rule: self.indexOf('/') == self.lastIndexOf('/') + - message: subnet must be in the range /0 to /125 inclusive + rule: self.split('/').size() == 2 && [int(self.split('/')[1])].all(x, x <= 125 && x >= 0) + - message: IPv6 addresses must contain at most one '::' and may only be shortened once + rule: self.indexOf('::') == self.lastIndexOf('::') + - message: a valid IPv6 address must contain 8 segments unless elided (::), in which case it must contain at most 6 non-empty segments + rule: 'self.contains(''::'') ? self.split(''/'')[0].split('':'').size() <= 8 : self.split(''/'')[0].split('':'').size() == 8' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 1 + rule: 'self.split(''/'')[0].split('':'').size() >=1 ? [self.split(''/'')[0].split('':'', 8)[0]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 2 + rule: 'self.split(''/'')[0].split('':'').size() >=2 ? [self.split(''/'')[0].split('':'', 8)[1]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 3 + rule: 'self.split(''/'')[0].split('':'').size() >=3 ? [self.split(''/'')[0].split('':'', 8)[2]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 4 + rule: 'self.split(''/'')[0].split('':'').size() >=4 ? [self.split(''/'')[0].split('':'', 8)[3]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 5 + rule: 'self.split(''/'')[0].split('':'').size() >=5 ? [self.split(''/'')[0].split('':'', 8)[4]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 6 + rule: 'self.split(''/'')[0].split('':'').size() >=6 ? [self.split(''/'')[0].split('':'', 8)[5]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 7 + rule: 'self.split(''/'')[0].split('':'').size() >=7 ? [self.split(''/'')[0].split('':'', 8)[6]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: each segment of an IPv6 address must be a hexadecimal number between 0 and FFFF, failed on segment 8 + rule: 'self.split(''/'')[0].split('':'').size() >=8 ? [self.split(''/'')[0].split('':'', 8)[7]].all(x, x == '''' || (x.matches(''^[0-9A-Fa-f]{1,4}$'')) && size(x)<5 ) : true' + - message: IPv6 dual addresses are not permitted, value should not contain `.` characters + rule: '!self.contains(''.'')' + type: object + routingViaHost: + default: false + description: RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. + type: boolean + type: object + genevePort: + description: geneve port is the UDP port to be used by geneve encapulation. Default is 6081 + format: int32 + minimum: 1.0 + type: integer + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + items: + description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks + properties: + cidr: + type: string + hostPrefix: + format: int32 + minimum: 0.0 + type: integer + type: object + type: array + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 + format: int32 + type: integer + type: object + ipsecConfig: + description: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 + format: int32 + minimum: 0.0 + type: integer + policyAuditConfig: + description: policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. + properties: + destination: + default: 'null' + description: 'destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node''s journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null"' + type: string + maxFileSize: + default: 50 + description: maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB + format: int32 + minimum: 1.0 + type: integer + maxLogFiles: + default: 5 + description: maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + format: int32 + minimum: 1.0 + type: integer + rateLimit: + default: 20 + description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. + format: int32 + minimum: 1.0 + type: integer + syslogFacility: + default: local0 + description: syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + type: string + type: object + v4InternalSubnet: + description: v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16 + type: string + v6InternalSubnet: + description: v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48 + type: string + type: object + type: + description: type is the type of network All NetworkTypes are supported except for NetworkTypeRaw + type: string + type: object + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + default: false + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. + type: boolean + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. + properties: + ipfix: + description: ipfix defines IPFIX configuration. + properties: + collectors: + description: ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + netFlow: + description: netFlow defines the NetFlow configuration. + properties: + collectors: + description: netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + sFlow: + description: sFlow defines the SFlow configuration. + properties: + collectors: + description: sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + type: object + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s' + type: string + proxyArguments: + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass to the kubeproxy process + items: + type: string + type: array + description: Any additional arguments to pass to the kubeproxy process + type: object + type: object + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + migration: + description: migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU. + properties: + features: + description: features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features. + properties: + egressFirewall: + default: true + description: egressFirewall specifies whether or not the Egress Firewall configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress Firewall configure is migrated. + type: boolean + egressIP: + default: true + description: egressIP specifies whether or not the Egress IP configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress IP configure is migrated. + type: boolean + multicast: + default: true + description: multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated. + type: boolean + type: object + mtu: + description: mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. + properties: + machine: + description: machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU. + properties: + from: + description: from is the MTU to migrate from. + format: int32 + minimum: 0.0 + type: integer + to: + description: to is the MTU to migrate to. + format: int32 + minimum: 0.0 + type: integer + type: object + network: + description: network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset. + properties: + from: + description: from is the MTU to migrate from. + format: int32 + minimum: 0.0 + type: integer + to: + description: to is the MTU to migrate to. + format: int32 + minimum: 0.0 + type: integer + type: object + type: object + networkType: + description: networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes + type: string + type: object + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. + items: + type: string + type: array + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. + type: boolean + type: object + status: + description: NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftapiservers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftapiservers.yaml new file mode 100644 index 000000000..8104aa1be --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftapiservers.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: openshiftapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftAPIServer + plural: openshiftapiservers + singular: openshiftapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the OpenShift API Server. + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status defines the observed status of the OpenShift API Server. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + format: int32 + minimum: 0.0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftcontrollermanagers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftcontrollermanagers.yaml new file mode 100644 index 000000000..89a599338 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/openshiftcontrollermanagers.yaml @@ -0,0 +1,134 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: openshiftcontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftControllerManager + plural: openshiftcontrollermanagers + singular: openshiftcontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/servicecas.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/servicecas.yaml new file mode 100644 index 000000000..1dad6bdea --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/servicecas.yaml @@ -0,0 +1,135 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: servicecas.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ServiceCA + listKind: ServiceCAList + plural: servicecas + singular: serviceca + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ServiceCA provides information to configure an operator to manage the service cert controllers \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/storages.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/storages.yaml new file mode 100644 index 000000000..464466732 --- /dev/null +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/storages.yaml @@ -0,0 +1,144 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/670 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: storages.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Storage + plural: storages + singular: storage + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - '' + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + vsphereStorageDriver: + description: 'VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.' + enum: + - '' + - LegacyDeprecatedInTreeDriver + - CSIWithMigrationDriver + type: string + x-kubernetes-validations: + - message: VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver + rule: self != "LegacyDeprecatedInTreeDriver" + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.args b/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.yaml b/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.yaml new file mode 100644 index 000000000..7dbe95387 --- /dev/null +++ b/crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.yaml @@ -0,0 +1,116 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1234 + exclude.release.openshift.io/internal-openshift-hosted: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: platformoperators.platform.openshift.io +spec: + group: platform.openshift.io + names: + kind: PlatformOperator + listKind: PlatformOperatorList + plural: platformoperators + singular: platformoperator + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "PlatformOperator is the Schema for the PlatformOperators API. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PlatformOperatorSpec defines the desired state of PlatformOperator. + properties: + package: + description: package contains the desired package and its configuration for this PlatformOperator. + properties: + name: + description: "name contains the desired OLM-based Operator package name that is defined in an existing CatalogSource resource in the cluster. \n This configured package will be managed with the cluster's lifecycle. In the current implementation, it will be retrieving this name from a list of supported operators out of the catalogs included with OpenShift. \n ---" + maxLength: 56 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + required: + - package + type: object + status: + description: PlatformOperatorStatus defines the observed state of PlatformOperator + properties: + activeBundleDeployment: + description: activeBundleDeployment is the reference to the BundleDeployment resource that's being managed by this PO resource. If this field is not populated in the status then it means the PlatformOperator has either not been installed yet or is failing to install. + properties: + name: + description: name is the metadata.name of the referenced BundleDeployment object. + type: string + required: + - name + type: object + conditions: + description: conditions represent the latest available observations of a platform operator's current state. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/quota.openshift.io/v1/clusterresourcequotas.yaml b/crd-catalog/openshift/api/quota.openshift.io/v1/clusterresourcequotas.yaml new file mode 100644 index 000000000..71a500565 --- /dev/null +++ b/crd-catalog/openshift/api/quota.openshift.io/v1/clusterresourcequotas.yaml @@ -0,0 +1,197 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: clusterresourcequotas.quota.openshift.io +spec: + group: quota.openshift.io + names: + kind: ClusterResourceQuota + listKind: ClusterResourceQuotaList + plural: clusterresourcequotas + singular: clusterresourcequota + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired quota + properties: + quota: + description: Quota defines the desired quota + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + scopeSelector: + description: scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + properties: + matchExpressions: + description: A list of scope selector requirements by scope of the resources. + items: + description: A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values. + properties: + operator: + description: Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector applies to. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - operator + - scopeName + type: object + type: array + type: object + x-kubernetes-map-type: atomic + scopes: + description: A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. + items: + description: A ResourceQuotaScope defines a filter that must match each object tracked by a quota + type: string + type: array + type: object + selector: + description: Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource. + properties: + annotations: + additionalProperties: + type: string + description: AnnotationSelector is used to select projects by annotation. + nullable: true + type: object + labels: + description: LabelSelector is used to select projects by label. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + required: + - quota + - selector + type: object + status: + description: Status defines the actual enforced quota and its current usage + properties: + namespaces: + description: Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project. + items: + description: ResourceQuotaStatusByNamespace gives status for a particular project + properties: + namespace: + description: Namespace the project this status applies to + type: string + status: + description: Status indicates how many resources have been consumed by this project + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + used: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Used is the current observed total usage of the resource in the namespace. + type: object + type: object + required: + - namespace + - status + type: object + nullable: true + type: array + total: + description: Total defines the actual enforced quota and its current usage across all projects + properties: + hard: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + used: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Used is the current observed total usage of the resource in the namespace. + type: object + type: object + required: + - total + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/route.openshift.io/v1/routes.args b/crd-catalog/openshift/api/route.openshift.io/v1/routes.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/route.openshift.io/v1/routes.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/route.openshift.io/v1/routes.yaml b/crd-catalog/openshift/api/route.openshift.io/v1/routes.yaml new file mode 100644 index 000000000..91c061799 --- /dev/null +++ b/crd-catalog/openshift/api/route.openshift.io/v1/routes.yaml @@ -0,0 +1,407 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1228 + name: routes.route.openshift.io +spec: + group: route.openshift.io + names: + kind: Route + plural: routes + singular: route + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ingress[0].host + name: Host + type: string + - jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + name: Admitted + type: string + - jsonPath: .spec.to.name + name: Service + type: string + - jsonPath: .spec.tls.type + name: TLS + type: string + name: v1 + schema: + openAPIV3Schema: + description: "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. \n Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. \n Routers are subject to additional customization and may support additional controls via the annotations field. \n Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. \n To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + allOf: + - anyOf: + - properties: + path: + maxLength: 0 + - properties: + tls: + enum: + - null + - not: + properties: + tls: + properties: + termination: + enum: + - passthrough + - anyOf: + - not: + properties: + host: + maxLength: 0 + - not: + properties: + wildcardPolicy: + enum: + - Subdomain + description: spec is the desired state of the route + properties: + alternateBackends: + description: alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. + items: + description: RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - '' + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256.0 + minimum: 0.0 + type: integer + required: + - kind + - name + type: object + maxItems: 3 + type: array + host: + description: host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + httpHeaders: + description: httpHeaders defines policy for HTTP headers. + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + properties: + request: + description: 'request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough.' + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + response: + description: 'response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Note: This field cannot be used if your route uses TLS passthrough.' + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + type: object + type: object + path: + description: path that the router watches for, to route traffic for to the service. Optional + pattern: ^/ + type: string + port: + description: If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. + properties: + targetPort: + allOf: + - not: + enum: + - 0 + - not: + enum: + - '' + x-kubernetes-int-or-string: true + required: + - targetPort + type: object + subdomain: + description: "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. \n Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`." + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + tls: + allOf: + - anyOf: + - properties: + caCertificate: + maxLength: 0 + certificate: + maxLength: 0 + destinationCACertificate: + maxLength: 0 + key: + maxLength: 0 + - not: + properties: + termination: + enum: + - passthrough + - anyOf: + - properties: + destinationCACertificate: + maxLength: 0 + - not: + properties: + termination: + enum: + - edge + description: The tls field provides the ability to configure certificates and termination for the route. + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + type: string + destinationCACertificate: + description: destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. + type: string + insecureEdgeTerminationPolicy: + description: "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. \n * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port." + enum: + - Allow + - None + - Redirect + - '' + type: string + key: + description: key provides key file contents + type: string + termination: + description: "termination indicates termination type. \n * edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend \n Note: passthrough termination is incompatible with httpHeader actions" + enum: + - edge + - reencrypt + - passthrough + type: string + required: + - termination + type: object + x-kubernetes-validations: + - message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + to: + description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - '' + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256.0 + minimum: 0.0 + type: integer + required: + - kind + - name + type: object + wildcardPolicy: + default: None + description: Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. + enum: + - None + - Subdomain + - '' + type: string + required: + - to + type: object + x-kubernetes-validations: + - message: header actions are not permitted when tls termination is passthrough. + rule: '!has(self.tls) || self.tls.termination != ''passthrough'' || !has(self.httpHeaders)' + status: + description: status is the current state of the route + properties: + ingress: + description: ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` + items: + description: RouteIngress holds information about the places where a route is exposed. + properties: + conditions: + description: Conditions is the state of the route, may be empty. + items: + description: RouteIngressCondition contains details for the current condition of this route on a particular router. + properties: + lastTransitionTime: + description: RFC 3339 date and time when this condition last transitioned + format: date-time + type: string + message: + description: Human readable message indicating details about last transition. + type: string + reason: + description: (brief) reason for the condition's last transition, and is usually a machine and human readable constant + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. Currently only Admitted. + type: string + required: + - status + - type + type: object + type: array + host: + description: Host is the host string under which the route is exposed; this value is required + type: string + routerCanonicalHostname: + description: CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. + type: string + routerName: + description: Name is a name chosen by the router to identify itself; this value is required + type: string + wildcardPolicy: + description: Wildcard policy is the wildcard policy that was allowed where this route is exposed. + type: string + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/samples.operator.openshift.io/v1/configs.yaml b/crd-catalog/openshift/api/samples.operator.openshift.io/v1/configs.yaml new file mode 100644 index 000000000..caabc8875 --- /dev/null +++ b/crd-catalog/openshift/api/samples.operator.openshift.io/v1/configs.yaml @@ -0,0 +1,127 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/513 + description: Extension for configuring openshif samples operator. + displayName: ConfigsSamples + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: configs.samples.operator.openshift.io +spec: + group: samples.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Config contains the configuration and detailed condition status for the Samples Operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace. + properties: + architectures: + description: architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently. + items: + type: string + type: array + managementState: + description: managementState is top level on/off type of switch for all operators. When "Managed", this operator processes config and manipulates the samples accordingly. When "Unmanaged", this operator ignores any updates to the resources it watches. When "Removed", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples, + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + samplesRegistry: + description: samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + items: + type: string + type: array + skippedTemplates: + description: skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + items: + type: string + type: array + type: object + status: + description: ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator. + properties: + architectures: + description: architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices. + items: + type: string + type: array + conditions: + description: conditions represents the available maintenance status of the sample imagestreams and templates. + items: + description: ConfigCondition captures various conditions of the Config as entries are processed. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. + format: date-time + type: string + lastUpdateTime: + description: lastUpdateTime is the last time this condition was updated. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. + type: string + reason: + description: reason is what caused the condition's last transition. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition. + type: string + required: + - status + - type + type: object + type: array + managementState: + description: managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. "Managed") when it was previously "Unmanaged". + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + samplesRegistry: + description: samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + items: + type: string + type: array + skippedTemplates: + description: skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + items: + type: string + type: array + version: + description: version is the value of the operator's payload based version indicator when it was last successfully processed + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/security.internal.openshift.io/v1/rangeallocations.yaml b/crd-catalog/openshift/api/security.internal.openshift.io/v1/rangeallocations.yaml new file mode 100644 index 000000000..cde063d1c --- /dev/null +++ b/crd-catalog/openshift/api/security.internal.openshift.io/v1/rangeallocations.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/751 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: rangeallocations.security.internal.openshift.io +spec: + group: security.internal.openshift.io + names: + kind: RangeAllocation + listKind: RangeAllocationList + plural: rangeallocations + singular: rangeallocation + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group This is an internal API, not intended for external consumption. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + data: + description: data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken. + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + range: + description: range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + type: string + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/security.openshift.io/v1/securitycontextconstraints.yaml b/crd-catalog/openshift/api/security.openshift.io/v1/securitycontextconstraints.yaml new file mode 100644 index 000000000..b456cf1a3 --- /dev/null +++ b/crd-catalog/openshift/api/security.openshift.io/v1/securitycontextconstraints.yaml @@ -0,0 +1,279 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: 'true' + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + name: securitycontextconstraints.security.openshift.io +spec: + group: security.openshift.io + names: + kind: SecurityContextConstraints + listKind: SecurityContextConstraintsList + plural: securitycontextconstraints + singular: securitycontextconstraints + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + name: v1 + schema: + openAPIV3Schema: + description: "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + allowHostDirVolumePlugin: + description: AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + type: boolean + allowHostIPC: + description: AllowHostIPC determines if the policy allows host ipc in the containers. + type: boolean + allowHostNetwork: + description: AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + type: boolean + allowHostPID: + description: AllowHostPID determines if the policy allows host pid in the containers. + type: boolean + allowHostPorts: + description: AllowHostPorts determines if the policy allows host ports in the containers. + type: boolean + allowPrivilegeEscalation: + description: AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. + nullable: true + type: boolean + allowPrivilegedContainer: + description: AllowPrivilegedContainer determines if a container can request to be run as privileged. + type: boolean + allowedCapabilities: + description: AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'. + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + type: array + allowedFlexVolumes: + description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field. + items: + description: AllowedFlexVolume represents a single Flexvolume that is allowed to be used. + properties: + driver: + description: Driver is the name of the Flexvolume driver. + type: string + required: + - driver + type: object + nullable: true + type: array + allowedUnsafeSysctls: + description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." + items: + type: string + nullable: true + type: array + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + defaultAddCapabilities: + description: DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities. + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + type: array + defaultAllowPrivilegeEscalation: + description: DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. + nullable: true + type: boolean + forbiddenSysctls: + description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc." + items: + type: string + nullable: true + type: array + fsGroup: + description: FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + nullable: true + properties: + ranges: + description: Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + properties: + max: + description: Max is the end of the range, inclusive. + format: int64 + type: integer + min: + description: Min is the start of the range, inclusive. + format: int64 + type: integer + type: object + type: array + type: + description: Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + type: string + type: object + groups: + description: The groups that have permission to use this security context constraints + items: + type: string + nullable: true + type: array + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + priority: + description: Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name. + format: int32 + nullable: true + type: integer + readOnlyRootFilesystem: + description: ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. + type: boolean + requiredDropCapabilities: + description: RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + type: array + runAsUser: + description: RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + nullable: true + properties: + type: + description: Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + type: string + uid: + description: UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids. + format: int64 + type: integer + uidRangeMax: + description: UIDRangeMax defines the max value for a strategy that allocates by range. + format: int64 + type: integer + uidRangeMin: + description: UIDRangeMin defines the min value for a strategy that allocates by range. + format: int64 + type: integer + type: object + seLinuxContext: + description: SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + nullable: true + properties: + seLinuxOptions: + description: seLinuxOptions required to run as; required for MustRunAs + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + type: + description: Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + type: string + type: object + seccompProfiles: + description: "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default." + items: + type: string + nullable: true + type: array + supplementalGroups: + description: SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + nullable: true + properties: + ranges: + description: Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + properties: + max: + description: Max is the end of the range, inclusive. + format: int64 + type: integer + min: + description: Min is the start of the range, inclusive. + format: int64 + type: integer + type: object + type: array + type: + description: Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + type: string + type: object + users: + description: The users who have permissions to use this security context constraints + items: + type: string + nullable: true + type: array + volumes: + description: Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"]. + items: + description: FS Type gives strong typing to different file systems that are used by volumes. + type: string + nullable: true + type: array + required: + - allowHostDirVolumePlugin + - allowHostIPC + - allowHostNetwork + - allowHostPID + - allowHostPorts + - allowPrivilegedContainer + - allowedCapabilities + - defaultAddCapabilities + - priority + - readOnlyRootFilesystem + - requiredDropCapabilities + - volumes + type: object + served: true + storage: true diff --git a/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.args b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.yaml b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.yaml new file mode 100644 index 000000000..cc68e8c60 --- /dev/null +++ b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.yaml @@ -0,0 +1,105 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/979 + description: Extension for sharing ConfigMaps across Namespaces + displayName: SharedConfigMap + name: sharedconfigmaps.sharedresource.openshift.io +spec: + group: sharedresource.openshift.io + names: + kind: SharedConfigMap + listKind: SharedConfigMapList + plural: sharedconfigmaps + singular: sharedconfigmap + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes: \n spec: volumes: - name: shared-configmap csi: driver: csi.sharedresource.openshift.io volumeAttributes: sharedConfigMap: my-share \n For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects. \n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` \n Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired shared configmap + properties: + configMapRef: + description: configMapRef is a reference to the ConfigMap to share + properties: + name: + description: name represents the name of the ConfigMap that is being referenced. + type: string + namespace: + description: namespace represents the namespace where the referenced ConfigMap is located. + type: string + required: + - name + - namespace + type: object + description: + description: description is a user readable explanation of what the backing resource provides. + type: string + required: + - configMapRef + type: object + status: + description: status is the observed status of the shared configmap + properties: + conditions: + description: conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.args b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.args new file mode 100644 index 000000000..7ab47510b --- /dev/null +++ b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.args @@ -0,0 +1 @@ +--derive=PartialEq diff --git a/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.yaml b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.yaml new file mode 100644 index 000000000..7f5ddb3fc --- /dev/null +++ b/crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.yaml @@ -0,0 +1,105 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/979 + description: Extension for sharing Secrets across Namespaces + displayName: SharedSecret + name: sharedsecrets.sharedresource.openshift.io +spec: + group: sharedresource.openshift.io + names: + kind: SharedSecret + listKind: SharedSecretList + plural: sharedsecrets + singular: sharedsecret + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes: \n spec: volumes: - name: shared-secret csi: driver: csi.sharedresource.openshift.io volumeAttributes: sharedSecret: my-share \n For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects. \n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` \n Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired shared secret + properties: + description: + description: description is a user readable explanation of what the backing resource provides. + type: string + secretRef: + description: secretRef is a reference to the Secret to share + properties: + name: + description: name represents the name of the Secret that is being referenced. + type: string + namespace: + description: namespace represents the namespace where the referenced Secret is located. + type: string + required: + - name + - namespace + type: object + required: + - secretRef + type: object + status: + description: status is the observed status of the shared secret + properties: + conditions: + description: conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0.0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/kube-custom-resources-rs/Cargo.toml b/kube-custom-resources-rs/Cargo.toml index 45443338d..e3b3f26fb 100644 --- a/kube-custom-resources-rs/Cargo.toml +++ b/kube-custom-resources-rs/Cargo.toml @@ -55,6 +55,7 @@ argoproj_io = [] asdb_aerospike_com = [] atlasmap_io = [] auth_ops42_org = [] +authorization_openshift_io = [] authzed_com = [] autoscaling_k8s_io = [] autoscaling_karmada_io = [] @@ -77,6 +78,7 @@ chaos_mesh_org = [] chaosblade_io = [] che_eclipse_org = [] cilium_io = [] +cloud_network_openshift_io = [] cloudformation_linki_space = [] cluster_clusterpedia_io = [] cluster_ipfs_io = [] @@ -86,6 +88,9 @@ config_gatekeeper_sh = [] config_grafana_com = [] config_karmada_io = [] config_koordinator_sh = [] +config_openshift_io = [] +console_openshift_io = [] +controlplane_operator_openshift_io = [] core_linuxsuren_github_com = [] core_openfeature_dev = [] couchbase_com = [] @@ -105,6 +110,7 @@ elasticsearch_k8s_elastic_co = [] elbv2_k8s_aws = [] emrcontainers_services_k8s_aws = [] enterprisesearch_k8s_elastic_co = [] +example_openshift_io = [] execution_furiko_io = [] executor_testkube_io = [] expansion_gatekeeper_sh = [] @@ -124,6 +130,7 @@ getambassador_io = [] gitops_hybrid_cloud_patterns_io = [] grafana_integreatly_org = [] hazelcast_com = [] +helm_openshift_io = [] helm_toolkit_fluxcd_io = [] hive_openshift_io = [] hiveinternal_openshift_io = [] @@ -132,10 +139,13 @@ hyperfoil_io = [] iam_services_k8s_aws = [] ibmcloud_ibm_com = [] image_toolkit_fluxcd_io = [] +imageregistry_operator_openshift_io = [] imaging_ingestion_alvearie_org = [] inference_kubedl_io = [] infinispan_org = [] infrastructure_cluster_x_k8s_io = [] +ingress_operator_openshift_io = [] +insights_openshift_io = [] installation_mattermost_com = [] integration_rock8s_com = [] iot_eclipse_org = [] @@ -167,6 +177,7 @@ logging_banzaicloud_io = [] logging_extensions_banzaicloud_io = [] loki_grafana_com = [] longhorn_io = [] +machine_openshift_io = [] machineconfiguration_openshift_io = [] maps_k8s_elastic_co = [] mariadb_mmontes_io = [] @@ -177,12 +188,15 @@ minio_min_io = [] mirrors_kts_studio = [] model_kubedl_io = [] monitoring_coreos_com = [] +monitoring_openshift_io = [] monocle_monocle_change_metrics_io = [] mq_services_k8s_aws = [] multicluster_crd_antrea_io = [] multicluster_x_k8s_io = [] mutations_gatekeeper_sh = [] nativestor_alauda_io = [] +network_openshift_io = [] +network_operator_openshift_io = [] networking_karmada_io = [] nfd_k8s_sigs_io = [] nfd_kubernetes_io = [] @@ -197,17 +211,20 @@ operator_authorino_kuadrant_io = [] operator_cluster_x_k8s_io = [] operator_cryostat_io = [] operator_open_cluster_management_io = [] +operator_openshift_io = [] operator_shipwright_io = [] operator_tigera_io = [] operator_victoriametrics_com = [] org_eclipse_che = [] pkg_crossplane_io = [] +platform_openshift_io = [] policy_clusterpedia_io = [] policy_karmada_io = [] postgres_operator_crunchydata_com = [] postgresql_cnpg_io = [] prometheusservice_services_k8s_aws = [] quay_redhat_com = [] +quota_openshift_io = [] ray_io = [] rds_services_k8s_aws = [] registry_apicur_io = [] @@ -216,10 +233,12 @@ reliablesyncs_kubeedge_io = [] repo_manager_pulpproject_org = [] resources_teleport_dev = [] rocketmq_apache_org = [] +route_openshift_io = [] rules_kubeedge_io = [] runtime_cluster_x_k8s_io = [] s3_services_k8s_aws = [] sagemaker_services_k8s_aws = [] +samples_operator_openshift_io = [] scheduling_koordinator_sh = [] scheduling_sigs_k8s_io = [] scheduling_volcano_sh = [] @@ -229,11 +248,14 @@ secretgenerator_mittwald_de = [] secrets_crossplane_io = [] secrets_hashicorp_com = [] secscan_quay_redhat_com = [] +security_internal_openshift_io = [] +security_openshift_io = [] security_profiles_operator_x_k8s_io = [] servicebinding_io = [] services_k8s_aws = [] serving_kubedl_io = [] sfn_services_k8s_aws = [] +sharedresource_openshift_io = [] site_superedge_io = [] slo_koordinator_sh = [] sloth_slok_dev = [] diff --git a/kube-custom-resources-rs/src/authorization_openshift_io/mod.rs b/kube-custom-resources-rs/src/authorization_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/authorization_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/authorization_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/authorization_openshift_io/v1/mod.rs new file mode 100644 index 000000000..28d106172 --- /dev/null +++ b/kube-custom-resources-rs/src/authorization_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod rolebindingrestrictions; diff --git a/kube-custom-resources-rs/src/authorization_openshift_io/v1/rolebindingrestrictions.rs b/kube-custom-resources-rs/src/authorization_openshift_io/v1/rolebindingrestrictions.rs new file mode 100644 index 000000000..7ce9bb077 --- /dev/null +++ b/kube-custom-resources-rs/src/authorization_openshift_io/v1/rolebindingrestrictions.rs @@ -0,0 +1,118 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/authorization.openshift.io/v1/rolebindingrestrictions.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// Spec defines the matcher. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "authorization.openshift.io", version = "v1", kind = "RoleBindingRestriction", plural = "rolebindingrestrictions")] +#[kube(namespaced)] +#[kube(schema = "disabled")] +pub struct RoleBindingRestrictionSpec { + /// GroupRestriction matches against group subjects. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub grouprestriction: Option, + /// ServiceAccountRestriction matches against service-account subjects. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub serviceaccountrestriction: Option, + /// UserRestriction matches against user subjects. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub userrestriction: Option, +} + +/// GroupRestriction matches against group subjects. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionGrouprestriction { + /// Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub groups: Option>, + /// Selectors specifies a list of label selectors over group labels. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionGrouprestrictionLabels { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionGrouprestrictionLabelsMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ServiceAccountRestriction matches against service-account subjects. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionServiceaccountrestriction { + /// Namespaces specifies a list of literal namespace names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// ServiceAccounts specifies a list of literal service-account names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub serviceaccounts: Option>, +} + +/// ServiceAccountReference specifies a service account and namespace by their names. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionServiceaccountrestrictionServiceaccounts { + /// Name is the name of the service account. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + +/// UserRestriction matches against user subjects. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionUserrestriction { + /// Groups specifies a list of literal group names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub groups: Option>, + /// Selectors specifies a list of label selectors over user labels. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Users specifies a list of literal user names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub users: Option>, +} + +/// A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionUserrestrictionLabels { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct RoleBindingRestrictionUserrestrictionLabelsMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + diff --git a/kube-custom-resources-rs/src/cloud_network_openshift_io/mod.rs b/kube-custom-resources-rs/src/cloud_network_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/cloud_network_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/cloudprivateipconfigs.rs b/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/cloudprivateipconfigs.rs new file mode 100644 index 000000000..e852a46bf --- /dev/null +++ b/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/cloudprivateipconfigs.rs @@ -0,0 +1,60 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/cloud.network.openshift.io/v1/cloudprivateipconfigs.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the definition of the desired private IP request. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "cloud.network.openshift.io", version = "v1", kind = "CloudPrivateIPConfig", plural = "cloudprivateipconfigs")] +#[kube(status = "CloudPrivateIPConfigStatus")] +#[kube(schema = "disabled")] +pub struct CloudPrivateIPConfigSpec { + /// node is the node name, as specified by the Kubernetes field: node.metadata.name + #[serde(default, skip_serializing_if = "Option::is_none")] + pub node: Option, +} + +/// status is the observed status of the desired private IP request. Read-only. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct CloudPrivateIPConfigStatus { + /// condition is the assignment condition of the private IP and its status + pub conditions: Vec, + /// node is the node name, as specified by the Kubernetes field: node.metadata.name + #[serde(default, skip_serializing_if = "Option::is_none")] + pub node: Option, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct CloudPrivateIPConfigStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: CloudPrivateIPConfigStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CloudPrivateIPConfigStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/mod.rs new file mode 100644 index 000000000..2cffe7c64 --- /dev/null +++ b/kube-custom-resources-rs/src/cloud_network_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod cloudprivateipconfigs; diff --git a/kube-custom-resources-rs/src/config_openshift_io/mod.rs b/kube-custom-resources-rs/src/config_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/apiservers.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/apiservers.rs new file mode 100644 index 000000000..bc8536994 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/apiservers.rs @@ -0,0 +1,237 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/apiservers.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "APIServer", plural = "apiservers")] +#[kube(status = "APIServerStatus")] +#[kube(schema = "disabled")] +pub struct APIServerSpec { + /// additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalCORSAllowedOrigins")] + pub additional_cors_allowed_origins: Option>, + /// audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub audit: Option, + /// clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientCA")] + pub client_ca: Option, + /// encryption allows the configuration of encryption of resources at the datastore layer. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub encryption: Option, + /// servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "servingCerts")] + pub serving_certs: Option, + /// tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + /// If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsSecurityProfile")] + pub tls_security_profile: Option, +} + +/// audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerAudit { + /// customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customRules")] + pub custom_rules: Option>, + /// profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. + /// The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + /// Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. + /// If unset, the 'Default' profile is used as the default. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub profile: Option, +} + +/// AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerAuditCustomRules { + /// group is a name of group a request user must be member of in order to this profile to apply. + pub group: String, + /// profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. + /// The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + /// If unset, the 'Default' profile is used as the default. + pub profile: APIServerAuditCustomRulesProfile, +} + +/// AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum APIServerAuditCustomRulesProfile { + Default, + WriteRequestBodies, + AllRequestBodies, + None, +} + +/// audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum APIServerAuditProfile { + Default, + WriteRequestBodies, + AllRequestBodies, + None, +} + +/// clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerClientCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// encryption allows the configuration of encryption of resources at the datastore layer. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerEncryption { + /// type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. + /// When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: + /// 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// encryption allows the configuration of encryption of resources at the datastore layer. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum APIServerEncryptionType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "identity")] + Identity, + #[serde(rename = "aescbc")] + Aescbc, + #[serde(rename = "aesgcm")] + Aesgcm, +} + +/// servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerServingCerts { + /// namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namedCertificates")] + pub named_certificates: Option>, +} + +/// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerServingCertsNamedCertificates { + /// names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub names: Option>, + /// servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "servingCertificate")] + pub serving_certificate: Option, +} + +/// servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerServingCertsNamedCertificatesServingCertificate { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. +/// If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerTlsSecurityProfile { + /// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: + /// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub custom: Option, + /// intermediate is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub intermediate: Option, + /// modern is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 + /// NOTE: Currently unsupported. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub modern: Option, + /// old is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub old: Option, + /// type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + /// The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. + /// Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: +/// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerTlsSecurityProfileCustom { + /// ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): + /// ciphers: - DES-CBC3-SHA + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ciphers: Option>, + /// minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + /// minTLSVersion: TLSv1.1 + /// NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minTLSVersion")] + pub min_tls_version: Option, +} + +/// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: +/// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum APIServerTlsSecurityProfileCustomMinTlsVersion { + #[serde(rename = "VersionTLS10")] + VersionTls10, + #[serde(rename = "VersionTLS11")] + VersionTls11, + #[serde(rename = "VersionTLS12")] + VersionTls12, + #[serde(rename = "VersionTLS13")] + VersionTls13, +} + +/// intermediate is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerTlsSecurityProfileIntermediate { +} + +/// modern is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 +/// NOTE: Currently unsupported. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerTlsSecurityProfileModern { +} + +/// old is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerTlsSecurityProfileOld { +} + +/// tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. +/// If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum APIServerTlsSecurityProfileType { + Old, + Intermediate, + Modern, + Custom, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct APIServerStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/authentications.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/authentications.rs new file mode 100644 index 000000000..2e3260b3b --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/authentications.rs @@ -0,0 +1,90 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/authentications.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Authentication", plural = "authentications")] +#[kube(status = "AuthenticationStatus")] +#[kube(schema = "disabled")] +pub struct AuthenticationSpec { + /// oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "oauthMetadata")] + pub oauth_metadata: Option, + /// serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountIssuer")] + pub service_account_issuer: Option, + /// type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + /// webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. + /// Can only be set if "Type" is set to "None". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "webhookTokenAuthenticator")] + pub webhook_token_authenticator: Option, + /// webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "webhookTokenAuthenticators")] + pub webhook_token_authenticators: Option>, +} + +/// oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationOauthMetadata { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. +/// Can only be set if "Type" is set to "None". +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationWebhookTokenAuthenticator { + /// kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. + /// For further details, see: + /// https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + /// The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. + #[serde(rename = "kubeConfig")] + pub kube_config: AuthenticationWebhookTokenAuthenticatorKubeConfig, +} + +/// kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. +/// For further details, see: +/// https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +/// The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationWebhookTokenAuthenticatorKubeConfig { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationWebhookTokenAuthenticators { + /// kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kubeConfig")] + pub kube_config: Option, +} + +/// kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationWebhookTokenAuthenticatorsKubeConfig { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatus { + /// integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "integratedOAuthMetadata")] + pub integrated_o_auth_metadata: Option, +} + +/// integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatusIntegratedOAuthMetadata { + /// name is the metadata.name of the referenced config map + pub name: String, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/clusteroperators.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/clusteroperators.rs new file mode 100644 index 000000000..fd91cf0ce --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/clusteroperators.rs @@ -0,0 +1,74 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/clusteroperators.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds configuration that could apply to any operator. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "ClusterOperator", plural = "clusteroperators")] +#[kube(status = "ClusterOperatorStatus")] +#[kube(schema = "disabled")] +pub struct ClusterOperatorSpec { +} + +/// status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterOperatorStatus { + /// conditions describes the state of the operator's managed and monitored components. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// extension contains any additional status information specific to the operator which owns this status object. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extension: Option>, + /// relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces + #[serde(default, skip_serializing_if = "Option::is_none", rename = "relatedObjects")] + pub related_objects: Option>, + /// versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name "operator". An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub versions: Option>, +} + +/// ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterOperatorStatusConditions { + /// lastTransitionTime is the time of the last update to the current status property. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// reason is the CamelCase reason for the condition's current status. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// status of the condition, one of True, False, Unknown. + pub status: String, + /// type specifies the aspect reported by this condition. + #[serde(rename = "type")] + pub r#type: String, +} + +/// ObjectReference contains enough information to let you inspect or modify the referred object. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterOperatorStatusRelatedObjects { + /// group of the referent. + pub group: String, + /// name of the referent. + pub name: String, + /// namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource of the referent. + pub resource: String, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterOperatorStatusVersions { + /// name is the name of the particular operand this version is for. It usually matches container images, not operators. + pub name: String, + /// version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + pub version: String, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/clusterversions.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/clusterversions.rs new file mode 100644 index 000000000..f383963cf --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/clusterversions.rs @@ -0,0 +1,324 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/clusterversions.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "ClusterVersion", plural = "clusterversions")] +#[kube(status = "ClusterVersionStatus")] +#[kube(schema = "disabled")] +pub struct ClusterVersionSpec { + /// capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub capabilities: Option, + /// channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub channel: Option, + /// clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + #[serde(rename = "clusterID")] + pub cluster_id: String, + /// desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. + /// Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. + /// If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "desiredUpdate")] + pub desired_update: Option, + /// overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub overrides: Option>, + /// upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub upstream: Option, +} + +/// capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionCapabilities { + /// additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalEnabledCapabilities")] + pub additional_enabled_capabilities: Option>, + /// baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "baselineCapabilitySet")] + pub baseline_capability_set: Option, +} + +/// capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterVersionCapabilitiesBaselineCapabilitySet { + None, + #[serde(rename = "v4.11")] + V411, + #[serde(rename = "v4.12")] + V412, + #[serde(rename = "v4.13")] + V413, + #[serde(rename = "v4.14")] + V414, + #[serde(rename = "v4.15")] + V415, + #[serde(rename = "vCurrent")] + VCurrent, +} + +/// desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. +/// Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. +/// If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionDesiredUpdate { + /// architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub architecture: Option, + /// force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub force: Option, + /// image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub image: Option, + /// version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. +/// Some of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error. +/// If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterVersionDesiredUpdateArchitecture { + Multi, + #[serde(rename = "")] + KopiumEmpty, +} + +/// ComponentOverride allows overriding cluster version operator's behavior for a component. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionOverrides { + /// group identifies the API group that the kind is in. + pub group: String, + /// kind indentifies which object to override. + pub kind: String, + /// name is the component's name. + pub name: String, + /// namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + pub namespace: String, + /// unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false + pub unmanaged: bool, +} + +/// status contains information about the available updates and any in-progress updates. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatus { + /// availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + #[serde(rename = "availableUpdates")] + pub available_updates: Vec, + /// capabilities describes the state of optional, core cluster components. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub capabilities: Option, + /// conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "conditionalUpdates")] + pub conditional_updates: Option>, + /// conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + pub desired: ClusterVersionStatusDesired, + /// history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub history: Option>, + /// observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + #[serde(rename = "observedGeneration")] + pub observed_generation: i64, + /// versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + #[serde(rename = "versionHash")] + pub version_hash: String, +} + +/// Release represents an OpenShift release image and associated metadata. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusAvailableUpdates { + /// channels is the set of Cincinnati channels to which the release currently belongs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub channels: Option>, + /// image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub image: Option, + /// url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, + /// version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// capabilities describes the state of optional, core cluster components. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusCapabilities { + /// enabledCapabilities lists all the capabilities that are currently managed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "enabledCapabilities")] + pub enabled_capabilities: Option>, + /// knownCapabilities lists all the capabilities known to the current cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "knownCapabilities")] + pub known_capabilities: Option>, +} + +/// ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdates { + /// conditions represents the observations of the conditional update's current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// release is the target of the update. + pub release: ClusterVersionStatusConditionalUpdatesRelease, + /// risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + pub risks: Vec, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdatesConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: ClusterVersionStatusConditionalUpdatesConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterVersionStatusConditionalUpdatesConditionsStatus { + True, + False, + Unknown, +} + +/// release is the target of the update. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdatesRelease { + /// channels is the set of Cincinnati channels to which the release currently belongs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub channels: Option>, + /// image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub image: Option, + /// url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, + /// version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdatesRisks { + /// matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + #[serde(rename = "matchingRules")] + pub matching_rules: Vec, + /// message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + pub message: String, + /// name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + pub name: String, + /// url contains information about this risk. + pub url: String, +} + +/// ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdatesRisksMatchingRules { + /// promQL represents a cluster condition based on PromQL. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub promql: Option, + /// type represents the cluster-condition type. This defines the members and semantics of any additional properties. + #[serde(rename = "type")] + pub r#type: ClusterVersionStatusConditionalUpdatesRisksMatchingRulesType, +} + +/// promQL represents a cluster condition based on PromQL. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditionalUpdatesRisksMatchingRulesPromql { + /// PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + pub promql: String, +} + +/// ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterVersionStatusConditionalUpdatesRisksMatchingRulesType { + Always, + #[serde(rename = "PromQL")] + PromQl, +} + +/// ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusConditions { + /// lastTransitionTime is the time of the last update to the current status property. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// reason is the CamelCase reason for the condition's current status. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// status of the condition, one of True, False, Unknown. + pub status: String, + /// type specifies the aspect reported by this condition. + #[serde(rename = "type")] + pub r#type: String, +} + +/// desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusDesired { + /// channels is the set of Cincinnati channels to which the release currently belongs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub channels: Option>, + /// image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub image: Option, + /// url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, + /// version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// UpdateHistory is a single attempted update to the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterVersionStatusHistory { + /// acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "acceptedRisks")] + pub accepted_risks: Option, + /// completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + #[serde(rename = "completionTime")] + pub completion_time: String, + /// image is a container image location that contains the update. This value is always populated. + pub image: String, + /// startedTime is the time at which the update was started. + #[serde(rename = "startedTime")] + pub started_time: String, + /// state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + pub state: String, + /// verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + pub verified: bool, + /// version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/consoles.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/consoles.rs new file mode 100644 index 000000000..ef4085e2e --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/consoles.rs @@ -0,0 +1,34 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/consoles.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Console", plural = "consoles")] +#[kube(status = "ConsoleStatus")] +#[kube(schema = "disabled")] +pub struct ConsoleSpec { + /// ConsoleAuthentication defines a list of optional configuration for console authentication. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub authentication: Option, +} + +/// ConsoleAuthentication defines a list of optional configuration for console authentication. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleAuthentication { + /// An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logoutRedirect")] + pub logout_redirect: Option, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleStatus { + /// The URL for the console. This will be derived from the host for the route that is created for the console. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "consoleURL")] + pub console_url: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/dnses.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/dnses.rs new file mode 100644 index 000000000..10c4f31a0 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/dnses.rs @@ -0,0 +1,121 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/dnses.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "DNS", plural = "dnses")] +#[kube(status = "DNSStatus")] +#[kube(schema = "disabled")] +pub struct DNSSpec { + /// baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. + /// For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. + /// Once set, this field cannot be changed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "baseDomain")] + pub base_domain: Option, + /// platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub platform: Option, + /// privateZone is the location where all the DNS records that are only available internally to the cluster exist. + /// If this field is nil, no private records should be created. + /// Once set, this field cannot be changed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateZone")] + pub private_zone: Option, + /// publicZone is the location where all the DNS records that are publicly accessible to the internet exist. + /// If this field is nil, no public records should be created. + /// Once set, this field cannot be changed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "publicZone")] + pub public_zone: Option, +} + +/// platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSPlatform { + /// aws contains DNS configuration specific to the Amazon Web Services cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// type is the underlying infrastructure provider for the cluster. Allowed values: "", "AWS". + /// Individual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults. + #[serde(rename = "type")] + pub r#type: DNSPlatformType, +} + +/// aws contains DNS configuration specific to the Amazon Web Services cloud provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSPlatformAws { + /// privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateZoneIAMRole")] + pub private_zone_iam_role: Option, +} + +/// platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSPlatformType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +/// privateZone is the location where all the DNS records that are only available internally to the cluster exist. +/// If this field is nil, no private records should be created. +/// Once set, this field cannot be changed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSPrivateZone { + /// id is the identifier that can be used to find the DNS hosted zone. + /// on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// tags can be used to query the DNS hosted zone. + /// on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +/// publicZone is the location where all the DNS records that are publicly accessible to the internet exist. +/// If this field is nil, no public records should be created. +/// Once set, this field cannot be changed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSPublicZone { + /// id is the identifier that can be used to find the DNS hosted zone. + /// on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// tags can be used to query the DNS hosted zone. + /// on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/featuregates.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/featuregates.rs new file mode 100644 index 000000000..b051ec7c0 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/featuregates.rs @@ -0,0 +1,99 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/featuregates.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "FeatureGate", plural = "featuregates")] +#[kube(status = "FeatureGateStatus")] +#[kube(schema = "disabled")] +pub struct FeatureGateSpec { + /// customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customNoUpgrade")] + pub custom_no_upgrade: Option, + /// featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "featureSet")] + pub feature_set: Option, +} + +/// customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateCustomNoUpgrade { + /// disabled is a list of all feature gates that you want to force off + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option>, + /// enabled is a list of all feature gates that you want to force on + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enabled: Option>, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateStatus { + /// conditions represent the observations of the current state. Known .status.conditions.type are: "DeterminationDegraded" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "featureGates")] + pub feature_gates: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: FeatureGateStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum FeatureGateStatusConditionsStatus { + True, + False, + Unknown, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateStatusFeatureGates { + /// disabled is a list of all feature gates that are disabled in the cluster for the named version. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option>, + /// enabled is a list of all feature gates that are enabled in the cluster for the named version. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enabled: Option>, + /// version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + pub version: String, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateStatusFeatureGatesDisabled { + /// name is the name of the FeatureGate. + pub name: String, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct FeatureGateStatusFeatureGatesEnabled { + /// name is the name of the FeatureGate. + pub name: String, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/imagedigestmirrorsets.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/imagedigestmirrorsets.rs new file mode 100644 index 000000000..d7db8f5f1 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/imagedigestmirrorsets.rs @@ -0,0 +1,46 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/imagedigestmirrorsets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "ImageDigestMirrorSet", plural = "imagedigestmirrorsets")] +#[kube(status = "ImageDigestMirrorSetStatus")] +#[kube(schema = "disabled")] +pub struct ImageDigestMirrorSetSpec { + /// imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using "ImageTagMirrorSet" CRD. + /// If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the "source", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. + /// If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. + /// When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageDigestMirrors")] + pub image_digest_mirrors: Option>, +} + +/// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageDigestMirrorSetImageDigestMirrors { + /// mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mirrorSourcePolicy")] + pub mirror_source_policy: Option, + /// mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mirrors: Option>, + /// source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + pub source: String, +} + +/// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ImageDigestMirrorSetImageDigestMirrorsMirrorSourcePolicy { + NeverContactSource, + AllowContactingSource, +} + +/// status contains the observed state of the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageDigestMirrorSetStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/images.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/images.rs new file mode 100644 index 000000000..aea51219b --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/images.rs @@ -0,0 +1,75 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/images.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Image", plural = "images")] +#[kube(status = "ImageStatus")] +#[kube(schema = "disabled")] +pub struct ImageSpec { + /// additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalTrustedCA")] + pub additional_trusted_ca: Option, + /// allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedRegistriesForImport")] + pub allowed_registries_for_import: Option>, + /// externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "externalRegistryHostnames")] + pub external_registry_hostnames: Option>, + /// registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "registrySources")] + pub registry_sources: Option, +} + +/// additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageAdditionalTrustedCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageAllowedRegistriesForImport { + /// domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainName")] + pub domain_name: Option, + /// insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub insecure: Option, +} + +/// registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageRegistrySources { + /// allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + /// Only one of BlockedRegistries or AllowedRegistries may be set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedRegistries")] + pub allowed_registries: Option>, + /// blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + /// Only one of BlockedRegistries or AllowedRegistries may be set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockedRegistries")] + pub blocked_registries: Option>, + /// containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerRuntimeSearchRegistries")] + pub container_runtime_search_registries: Option>, + /// insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "insecureRegistries")] + pub insecure_registries: Option>, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageStatus { + /// externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "externalRegistryHostnames")] + pub external_registry_hostnames: Option>, + /// internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "internalRegistryHostname")] + pub internal_registry_hostname: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/imagetagmirrorsets.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/imagetagmirrorsets.rs new file mode 100644 index 000000000..db039951f --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/imagetagmirrorsets.rs @@ -0,0 +1,46 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/imagetagmirrorsets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "ImageTagMirrorSet", plural = "imagetagmirrorsets")] +#[kube(status = "ImageTagMirrorSetStatus")] +#[kube(schema = "disabled")] +pub struct ImageTagMirrorSetSpec { + /// imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using "ImageDigestMirrorSet" CRD. + /// If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the "source", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. + /// If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. + /// When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageTagMirrors")] + pub image_tag_mirrors: Option>, +} + +/// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageTagMirrorSetImageTagMirrors { + /// mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mirrorSourcePolicy")] + pub mirror_source_policy: Option, + /// mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. "mirrors" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mirrors: Option>, + /// source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. "source" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table + pub source: String, +} + +/// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ImageTagMirrorSetImageTagMirrorsMirrorSourcePolicy { + NeverContactSource, + AllowContactingSource, +} + +/// status contains the observed state of the resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImageTagMirrorSetStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs new file mode 100644 index 000000000..b7897f978 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs @@ -0,0 +1,964 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Infrastructure", plural = "infrastructures")] +#[kube(status = "InfrastructureStatus")] +#[kube(schema = "disabled")] +pub struct InfrastructureSpec { + /// cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. + /// cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudConfig")] + pub cloud_config: Option, + /// platformSpec holds desired information specific to the underlying infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "platformSpec")] + pub platform_spec: Option, +} + +/// cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. +/// cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureCloudConfig { + /// Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// platformSpec holds desired information specific to the underlying infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpec { + /// AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "alibabaCloud")] + pub alibaba_cloud: Option, + /// AWS contains settings specific to the Amazon Web Services infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// Azure contains settings specific to the Azure infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option, + /// BareMetal contains settings specific to the BareMetal platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub baremetal: Option, + /// EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "equinixMetal")] + pub equinix_metal: Option, + /// ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub external: Option, + /// GCP contains settings specific to the Google Cloud Platform infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option, + /// IBMCloud contains settings specific to the IBMCloud infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibmcloud: Option, + /// Kubevirt contains settings specific to the kubevirt infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kubevirt: Option, + /// Nutanix contains settings specific to the Nutanix infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nutanix: Option, + /// OpenStack contains settings specific to the OpenStack infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub openstack: Option, + /// Ovirt contains settings specific to the oVirt infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ovirt: Option, + /// PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub powervs: Option, + /// type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + /// VSphere contains settings specific to the VSphere infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub vsphere: Option, +} + +/// AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecAlibabaCloud { +} + +/// AWS contains settings specific to the Amazon Web Services infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecAws { + /// serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceEndpoints")] + pub service_endpoints: Option>, +} + +/// AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecAwsServiceEndpoints { + /// name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// Azure contains settings specific to the Azure infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecAzure { +} + +/// BareMetal contains settings specific to the BareMetal platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecBaremetal { + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, +} + +/// EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecEquinixMetal { +} + +/// ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecExternal { + /// PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "platformName")] + pub platform_name: Option, +} + +/// GCP contains settings specific to the Google Cloud Platform infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecGcp { +} + +/// IBMCloud contains settings specific to the IBMCloud infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecIbmcloud { +} + +/// Kubevirt contains settings specific to the kubevirt infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecKubevirt { +} + +/// Nutanix contains settings specific to the Nutanix infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanix { + /// failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + /// prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + #[serde(rename = "prismCentral")] + pub prism_central: InfrastructurePlatformSpecNutanixPrismCentral, + /// prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. + #[serde(rename = "prismElements")] + pub prism_elements: Vec, +} + +/// NutanixFailureDomain configures failure domain information for the Nutanix platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixFailureDomains { + /// cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + pub cluster: InfrastructurePlatformSpecNutanixFailureDomainsCluster, + /// name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform. + pub name: String, + /// subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. + pub subnets: Vec, +} + +/// cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixFailureDomainsCluster { + /// name is the resource name in the PC. It cannot be empty if the type is Name. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// type is the identifier type to use for this resource. + #[serde(rename = "type")] + pub r#type: InfrastructurePlatformSpecNutanixFailureDomainsClusterType, + /// uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uuid: Option, +} + +/// cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructurePlatformSpecNutanixFailureDomainsClusterType { + #[serde(rename = "UUID")] + Uuid, + Name, +} + +/// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixFailureDomainsSubnets { + /// name is the resource name in the PC. It cannot be empty if the type is Name. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// type is the identifier type to use for this resource. + #[serde(rename = "type")] + pub r#type: InfrastructurePlatformSpecNutanixFailureDomainsSubnetsType, + /// uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uuid: Option, +} + +/// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructurePlatformSpecNutanixFailureDomainsSubnetsType { + #[serde(rename = "UUID")] + Uuid, + Name, +} + +/// prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixPrismCentral { + /// address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + pub address: String, + /// port is the port number to access the Nutanix Prism Central or Element (cluster) + pub port: i32, +} + +/// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixPrismElements { + /// endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + pub endpoint: InfrastructurePlatformSpecNutanixPrismElementsEndpoint, + /// name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). + pub name: String, +} + +/// endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecNutanixPrismElementsEndpoint { + /// address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + pub address: String, + /// port is the port number to access the Nutanix Prism Central or Element (cluster) + pub port: i32, +} + +/// OpenStack contains settings specific to the OpenStack infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecOpenstack { + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, +} + +/// Ovirt contains settings specific to the oVirt infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecOvirt { +} + +/// PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecPowervs { + /// serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceEndpoints")] + pub service_endpoints: Option>, +} + +/// PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecPowervsServiceEndpoints { + /// name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pub name: String, + /// url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pub url: String, +} + +/// platformSpec holds desired information specific to the underlying infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructurePlatformSpecType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +/// VSphere contains settings specific to the VSphere infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphere { + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, + /// nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeNetworking")] + pub node_networking: Option, + /// vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported. --- + #[serde(default, skip_serializing_if = "Option::is_none")] + pub vcenters: Option>, +} + +/// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereFailureDomains { + /// name defines the arbitrary but unique name of a failure domain. + pub name: String, + /// region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region. + pub region: String, + /// server is the fully-qualified domain name or the IP address of the vCenter server. --- + pub server: String, + /// Topology describes a given failure domain using vSphere constructs + pub topology: InfrastructurePlatformSpecVsphereFailureDomainsTopology, + /// zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone. + pub zone: String, +} + +/// Topology describes a given failure domain using vSphere constructs +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereFailureDomainsTopology { + /// computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters. + #[serde(rename = "computeCluster")] + pub compute_cluster: String, + /// datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters. + pub datacenter: String, + /// datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters. + pub datastore: String, + /// folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/. + pub networks: Vec, + /// resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, +} + +/// nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereNodeNetworking { + /// external represents the network configuration of the node that is externally routable. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub external: Option, + /// internal represents the network configuration of the node that is routable only within the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub internal: Option, +} + +/// external represents the network configuration of the node that is externally routable. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereNodeNetworkingExternal { + /// excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeNetworkSubnetCidr")] + pub exclude_network_subnet_cidr: Option>, + /// network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option, + /// networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkSubnetCidr")] + pub network_subnet_cidr: Option>, +} + +/// internal represents the network configuration of the node that is routable only within the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereNodeNetworkingInternal { + /// excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeNetworkSubnetCidr")] + pub exclude_network_subnet_cidr: Option>, + /// network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option, + /// networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkSubnetCidr")] + pub network_subnet_cidr: Option>, +} + +/// VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructurePlatformSpecVsphereVcenters { + /// The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology. + pub datacenters: Vec, + /// port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub port: Option, + /// server is the fully-qualified domain name or the IP address of the vCenter server. --- + pub server: String, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatus { + /// apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalURI")] + pub api_server_internal_uri: Option, + /// apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerURL")] + pub api_server_url: Option, + /// controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneTopology")] + pub control_plane_topology: Option, + /// cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are "None" and "AllNodes". When omitted, the default value is "None". The default value of "None" indicates that no nodes will be setup with CPU partitioning. The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cpuPartitioning")] + pub cpu_partitioning: Option, + /// etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "etcdDiscoveryDomain")] + pub etcd_discovery_domain: Option, + /// infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureName")] + pub infrastructure_name: Option, + /// infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureTopology")] + pub infrastructure_topology: Option, + /// platform is the underlying infrastructure provider for the cluster. + /// Deprecated: Use platformStatus.type instead. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub platform: Option, + /// platformStatus holds status information specific to the underlying infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "platformStatus")] + pub platform_status: Option, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusControlPlaneTopology { + HighlyAvailable, + SingleReplica, + External, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusCpuPartitioning { + None, + AllNodes, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusInfrastructureTopology { + HighlyAvailable, + SingleReplica, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatform { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +/// platformStatus holds status information specific to the underlying infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatus { + /// AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "alibabaCloud")] + pub alibaba_cloud: Option, + /// AWS contains settings specific to the Amazon Web Services infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// Azure contains settings specific to the Azure infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option, + /// BareMetal contains settings specific to the BareMetal platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub baremetal: Option, + /// EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "equinixMetal")] + pub equinix_metal: Option, + /// External contains settings specific to the generic External infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub external: Option, + /// GCP contains settings specific to the Google Cloud Platform infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option, + /// IBMCloud contains settings specific to the IBMCloud infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibmcloud: Option, + /// Kubevirt contains settings specific to the kubevirt infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kubevirt: Option, + /// Nutanix contains settings specific to the Nutanix infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nutanix: Option, + /// OpenStack contains settings specific to the OpenStack infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub openstack: Option, + /// Ovirt contains settings specific to the oVirt infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ovirt: Option, + /// PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub powervs: Option, + /// type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + /// This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + /// VSphere contains settings specific to the VSphere infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub vsphere: Option, +} + +/// AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAlibabaCloud { + /// region specifies the region for Alibaba Cloud resources created for the cluster. + pub region: String, + /// resourceGroupID is the ID of the resource group for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroupID")] + pub resource_group_id: Option, + /// resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceTags")] + pub resource_tags: Option>, +} + +/// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAlibabaCloudResourceTags { + /// key is the key of the tag. + pub key: String, + /// value is the value of the tag. + pub value: String, +} + +/// AWS contains settings specific to the Amazon Web Services infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAws { + /// region holds the default AWS region for new AWS resources created by the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + /// resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceTags")] + pub resource_tags: Option>, + /// ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceEndpoints")] + pub service_endpoints: Option>, +} + +/// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAwsResourceTags { + /// key is the key of the tag + pub key: String, + /// value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + pub value: String, +} + +/// AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAwsServiceEndpoints { + /// name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// Azure contains settings specific to the Azure infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAzure { + /// armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "armEndpoint")] + pub arm_endpoint: Option, + /// cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudName")] + pub cloud_name: Option, + /// networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkResourceGroupName")] + pub network_resource_group_name: Option, + /// resourceGroupName is the Resource Group for new Azure resources created for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroupName")] + pub resource_group_name: Option, + /// resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceTags")] + pub resource_tags: Option>, +} + +/// Azure contains settings specific to the Azure infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatformStatusAzureCloudName { + #[serde(rename = "")] + KopiumEmpty, + AzurePublicCloud, + #[serde(rename = "AzureUSGovernmentCloud")] + AzureUsGovernmentCloud, + AzureChinaCloud, + AzureGermanCloud, + AzureStackCloud, +} + +/// AzureResourceTag is a tag to apply to Azure resources created for the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusAzureResourceTags { + /// key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`. + pub key: String, + /// value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. + pub value: String, +} + +/// BareMetal contains settings specific to the BareMetal platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusBaremetal { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + /// Deprecated: Use APIServerInternalIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + /// Deprecated: Use IngressIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, + /// nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDNSIP")] + pub node_dnsip: Option, +} + +/// EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusEquinixMetal { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, +} + +/// External contains settings specific to the generic External infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusExternal { + /// cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudControllerManager")] + pub cloud_controller_manager: Option, +} + +/// cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusExternalCloudControllerManager { + /// state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + /// Valid values are "External", "None" and omitted. When set to "External", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to "None", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +/// cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatformStatusExternalCloudControllerManagerState { + #[serde(rename = "")] + KopiumEmpty, + External, + None, +} + +/// GCP contains settings specific to the Google Cloud Platform infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusGcp { + /// resourceGroupName is the Project ID for new GCP resources created for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectID")] + pub project_id: Option, + /// region holds the region for new GCP resources created for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, +} + +/// IBMCloud contains settings specific to the IBMCloud infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusIbmcloud { + /// CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cisInstanceCRN")] + pub cis_instance_crn: Option, + /// DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsInstanceCRN")] + pub dns_instance_crn: Option, + /// Location is where the cluster has been deployed + #[serde(default, skip_serializing_if = "Option::is_none")] + pub location: Option, + /// ProviderType indicates the type of cluster that was created + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerType")] + pub provider_type: Option, + /// ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroupName")] + pub resource_group_name: Option, + /// serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceEndpoints")] + pub service_endpoints: Option>, +} + +/// IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusIbmcloudServiceEndpoints { + /// name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` + pub name: InfrastructureStatusPlatformStatusIbmcloudServiceEndpointsName, + /// url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pub url: String, +} + +/// IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatformStatusIbmcloudServiceEndpointsName { + #[serde(rename = "CIS")] + Cis, + #[serde(rename = "COS")] + Cos, + #[serde(rename = "DNSServices")] + DnsServices, + GlobalSearch, + GlobalTagging, + HyperProtect, + #[serde(rename = "IAM")] + Iam, + KeyProtect, + ResourceController, + ResourceManager, + #[serde(rename = "VPC")] + Vpc, +} + +/// Kubevirt contains settings specific to the kubevirt infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusKubevirt { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, +} + +/// Nutanix contains settings specific to the Nutanix infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusNutanix { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + /// Deprecated: Use APIServerInternalIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + /// Deprecated: Use IngressIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, +} + +/// OpenStack contains settings specific to the OpenStack infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusOpenstack { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + /// Deprecated: Use APIServerInternalIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudName")] + pub cloud_name: Option, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + /// Deprecated: Use IngressIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// loadBalancer defines how the load balancer used by the cluster is configured. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loadBalancer")] + pub load_balancer: Option, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, + /// nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDNSIP")] + pub node_dnsip: Option, +} + +/// loadBalancer defines how the load balancer used by the cluster is configured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusOpenstackLoadBalancer { + /// type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// loadBalancer defines how the load balancer used by the cluster is configured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatformStatusOpenstackLoadBalancerType { + OpenShiftManagedDefault, + UserManaged, +} + +/// Ovirt contains settings specific to the oVirt infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusOvirt { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + /// Deprecated: Use APIServerInternalIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + /// Deprecated: Use IngressIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDNSIP")] + pub node_dnsip: Option, +} + +/// PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusPowervs { + /// CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cisInstanceCRN")] + pub cis_instance_crn: Option, + /// DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsInstanceCRN")] + pub dns_instance_crn: Option, + /// region holds the default Power VS region for new Power VS resources created by the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + /// resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroup")] + pub resource_group: Option, + /// serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceEndpoints")] + pub service_endpoints: Option>, + /// zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported + #[serde(default, skip_serializing_if = "Option::is_none")] + pub zone: Option, +} + +/// PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusPowervsServiceEndpoints { + /// name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pub name: String, + /// url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pub url: String, +} + +/// platformStatus holds status information specific to the underlying infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InfrastructureStatusPlatformStatusType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +/// VSphere contains settings specific to the VSphere infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InfrastructureStatusPlatformStatusVsphere { + /// apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + /// Deprecated: Use APIServerInternalIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIP")] + pub api_server_internal_ip: Option, + /// apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerInternalIPs")] + pub api_server_internal_i_ps: Option>, + /// ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + /// Deprecated: Use IngressIPs instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIP")] + pub ingress_ip: Option, + /// ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressIPs")] + pub ingress_i_ps: Option>, + /// machineNetworks are IP networks used to connect all the OpenShift cluster nodes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "machineNetworks")] + pub machine_networks: Option>, + /// nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDNSIP")] + pub node_dnsip: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/ingresses.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/ingresses.rs new file mode 100644 index 000000000..89d781f34 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/ingresses.rs @@ -0,0 +1,295 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/ingresses.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Ingress", plural = "ingresses")] +#[kube(status = "IngressStatus")] +#[kube(schema = "disabled")] +pub struct IngressSpec { + /// appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "appsDomain")] + pub apps_domain: Option, + /// componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list. + /// To determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "componentRoutes")] + pub component_routes: Option>, + /// domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: "..". + /// It is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: "*.". + /// Once set, changing domain is not currently supported. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loadBalancer")] + pub load_balancer: Option, + /// requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission. + /// A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: "haproxy.router.openshift.io/hsts_header" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + /// - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation. + /// The HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. + /// Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredHSTSPolicies")] + pub required_hsts_policies: Option>, +} + +/// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressComponentRoutes { + /// hostname is the hostname that should be used by the route. + pub hostname: String, + /// name is the logical name of the route to customize. + /// The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized. + pub name: String, + /// namespace is the namespace of the route to customize. + /// The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized. + pub namespace: String, + /// servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "servingCertKeyPairSecret")] + pub serving_cert_key_pair_secret: Option, +} + +/// servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressComponentRoutesServingCertKeyPairSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressLoadBalancer { + /// platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub platform: Option, +} + +/// platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressLoadBalancerPlatform { + /// aws contains settings specific to the Amazon Web Services infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// type is the underlying infrastructure provider for the cluster. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// aws contains settings specific to the Amazon Web Services infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressLoadBalancerPlatformAws { + /// type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are: + /// * "Classic": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + /// * "NLB": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + #[serde(rename = "type")] + pub r#type: IngressLoadBalancerPlatformAwsType, +} + +/// aws contains settings specific to the Amazon Web Services infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressLoadBalancerPlatformAwsType { + #[serde(rename = "NLB")] + Nlb, + Classic, +} + +/// platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressLoadBalancerPlatformType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressRequiredHstsPolicies { + /// domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. + /// The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. + #[serde(rename = "domainPatterns")] + pub domain_patterns: Vec, + /// includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com + #[serde(default, skip_serializing_if = "Option::is_none", rename = "includeSubDomainsPolicy")] + pub include_sub_domains_policy: Option, + /// maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxAge")] + pub max_age: Option, + /// namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preloadPolicy")] + pub preload_policy: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressRequiredHstsPoliciesIncludeSubDomainsPolicy { + RequireIncludeSubDomains, + RequireNoIncludeSubDomains, + NoOpinion, +} + +/// maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressRequiredHstsPoliciesMaxAge { + /// The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "largestMaxAge")] + pub largest_max_age: Option, + /// The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "smallestMaxAge")] + pub smallest_max_age: Option, +} + +/// namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressRequiredHstsPoliciesNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressRequiredHstsPoliciesNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressRequiredHstsPoliciesPreloadPolicy { + RequirePreload, + RequireNoPreload, + NoOpinion, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressStatus { + /// componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "componentRoutes")] + pub component_routes: Option>, + /// defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes. + /// This field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments. + /// See the documentation for the IngressController replicas and nodePlacement fields for more information. + /// When omitted, the default value is Workers + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultPlacement")] + pub default_placement: Option, +} + +/// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressStatusComponentRoutes { + /// conditions are used to communicate the state of the componentRoutes entry. + /// Supported conditions include Available, Degraded and Progressing. + /// If available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured. + /// If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect. + /// If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "consumingUsers")] + pub consuming_users: Option>, + /// currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentHostnames")] + pub current_hostnames: Option>, + /// defaultHostname is the hostname of this route prior to customization. + #[serde(rename = "defaultHostname")] + pub default_hostname: String, + /// name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. + /// The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized. + pub name: String, + /// namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times. + /// The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized. + pub namespace: String, + /// relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + #[serde(rename = "relatedObjects")] + pub related_objects: Vec, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressStatusComponentRoutesConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: IngressStatusComponentRoutesConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressStatusComponentRoutesConditionsStatus { + True, + False, + Unknown, +} + +/// ObjectReference contains enough information to let you inspect or modify the referred object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressStatusComponentRoutesRelatedObjects { + /// group of the referent. + pub group: String, + /// name of the referent. + pub name: String, + /// namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource of the referent. + pub resource: String, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressStatusDefaultPlacement { + ControlPlane, + Workers, + #[serde(rename = "")] + KopiumEmpty, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/mod.rs new file mode 100644 index 000000000..1d8e963b1 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/mod.rs @@ -0,0 +1,19 @@ +pub mod apiservers; +pub mod authentications; +pub mod clusteroperators; +pub mod clusterversions; +pub mod consoles; +pub mod dnses; +pub mod featuregates; +pub mod imagedigestmirrorsets; +pub mod images; +pub mod imagetagmirrorsets; +pub mod infrastructures; +pub mod ingresses; +pub mod networks; +pub mod nodes; +pub mod oauths; +pub mod operatorhubs; +pub mod projects; +pub mod proxies; +pub mod schedulers; diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/networks.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/networks.rs new file mode 100644 index 000000000..1d2e91e53 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/networks.rs @@ -0,0 +1,146 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/networks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Network", plural = "networks")] +#[kube(schema = "disabled")] +pub struct NetworkSpec { + /// IP address pool to use for pod IPs. This field is immutable after installation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetwork")] + pub cluster_network: Option>, + /// externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "externalIP")] + pub external_ip: Option, + /// NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] + pub network_type: Option, + /// IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceNetwork")] + pub service_network: Option>, + /// The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceNodePortRange")] + pub service_node_port_range: Option, +} + +/// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkClusterNetwork { + /// The complete block for pod IPs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cidr: Option, + /// The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPrefix")] + pub host_prefix: Option, +} + +/// externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExternalIp { + /// autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoAssignCIDRs")] + pub auto_assign_cid_rs: Option>, + /// policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub policy: Option, +} + +/// policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExternalIpPolicy { + /// allowedCIDRs is the list of allowed CIDRs. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedCIDRs")] + pub allowed_cid_rs: Option>, + /// rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rejectedCIDRs")] + pub rejected_cid_rs: Option>, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatus { + /// IP address pool to use for pod IPs. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetwork")] + pub cluster_network: Option>, + /// ClusterNetworkMTU is the MTU for inter-pod networking. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetworkMTU")] + pub cluster_network_mtu: Option, + /// Migration contains the cluster network migration configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub migration: Option, + /// NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] + pub network_type: Option, + /// IP address pool for services. Currently, we only support a single entry here. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceNetwork")] + pub service_network: Option>, +} + +/// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusClusterNetwork { + /// The complete block for pod IPs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cidr: Option, + /// The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPrefix")] + pub host_prefix: Option, +} + +/// Migration contains the cluster network migration configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusMigration { + /// MTU contains the MTU migration configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] + pub network_type: Option, +} + +/// MTU contains the MTU migration configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusMigrationMtu { + /// Machine contains MTU migration configuration for the machine's uplink. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub machine: Option, + /// Network contains MTU migration configuration for the default network. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option, +} + +/// Machine contains MTU migration configuration for the machine's uplink. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusMigrationMtuMachine { + /// From is the MTU to migrate from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub from: Option, + /// To is the MTU to migrate to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub to: Option, +} + +/// Network contains MTU migration configuration for the default network. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusMigrationMtuNetwork { + /// From is the MTU to migrate from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub from: Option, + /// To is the MTU to migrate to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub to: Option, +} + +/// Migration contains the cluster network migration configuration. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum NetworkStatusMigrationNetworkType { + #[serde(rename = "OpenShiftSDN")] + OpenShiftSdn, + #[serde(rename = "OVNKubernetes")] + OvnKubernetes, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/nodes.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/nodes.rs new file mode 100644 index 000000000..8082835b1 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/nodes.rs @@ -0,0 +1,45 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/nodes.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Node", plural = "nodes")] +#[kube(status = "NodeStatus")] +#[kube(schema = "disabled")] +pub struct NodeSpec { + /// CgroupMode determines the cgroups version on the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cgroupMode")] + pub cgroup_mode: Option, + /// WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster + #[serde(default, skip_serializing_if = "Option::is_none", rename = "workerLatencyProfile")] + pub worker_latency_profile: Option, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum NodeCgroupMode { + #[serde(rename = "v1")] + V1, + #[serde(rename = "v2")] + V2, + #[serde(rename = "")] + KopiumEmpty, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum NodeWorkerLatencyProfile { + Default, + MediumUpdateAverageReaction, + LowUpdateSlowReaction, +} + +/// status holds observed values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NodeStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/oauths.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/oauths.rs new file mode 100644 index 000000000..bcc74cb85 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/oauths.rs @@ -0,0 +1,451 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/oauths.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "OAuth", plural = "oauths")] +#[kube(status = "OAuthStatus")] +#[kube(schema = "disabled")] +pub struct OAuthSpec { + /// identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "identityProviders")] + pub identity_providers: Option>, + /// templates allow you to customize pages like the login page. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub templates: Option, + /// tokenConfig contains options for authorization and access tokens + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tokenConfig")] + pub token_config: Option, +} + +/// IdentityProvider provides identities for users authenticating using credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProviders { + /// basicAuth contains configuration options for the BasicAuth IdP + #[serde(default, skip_serializing_if = "Option::is_none", rename = "basicAuth")] + pub basic_auth: Option, + /// github enables user authentication using GitHub credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub github: Option, + /// gitlab enables user authentication using GitLab credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gitlab: Option, + /// google enables user authentication using Google credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub google: Option, + /// htpasswd enables user authentication using an HTPasswd file to validate credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub htpasswd: Option, + /// keystone enables user authentication using keystone password credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub keystone: Option, + /// ldap enables user authentication using LDAP credentials + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ldap: Option, + /// mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mappingMethod")] + pub mapping_method: Option, + /// name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// openID enables user authentication using OpenID credentials + #[serde(default, skip_serializing_if = "Option::is_none", rename = "openID")] + pub open_id: Option, + /// requestHeader enables user authentication using request header credentials + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requestHeader")] + pub request_header: Option, + /// type identifies the identity provider type for this entry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// basicAuth contains configuration options for the BasicAuth IdP +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersBasicAuth { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientCert")] + pub tls_client_cert: Option, + /// tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientKey")] + pub tls_client_key: Option, + /// url is the remote URL to connect to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersBasicAuthCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersBasicAuthTlsClientCert { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersBasicAuthTlsClientKey { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// github enables user authentication using GitHub credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGithub { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// clientID is the oauth client ID + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientID")] + pub client_id: Option, + /// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecret")] + pub client_secret: Option, + /// hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostname: Option, + /// organizations optionally restricts which organizations are allowed to log in + #[serde(default, skip_serializing_if = "Option::is_none")] + pub organizations: Option>, + /// teams optionally restricts which teams are allowed to log in. Format is /. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub teams: Option>, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGithubCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGithubClientSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// gitlab enables user authentication using GitLab credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGitlab { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// clientID is the oauth client ID + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientID")] + pub client_id: Option, + /// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecret")] + pub client_secret: Option, + /// url is the oauth server base URL + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGitlabCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGitlabClientSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// google enables user authentication using Google credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGoogle { + /// clientID is the oauth client ID + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientID")] + pub client_id: Option, + /// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecret")] + pub client_secret: Option, + /// hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostedDomain")] + pub hosted_domain: Option, +} + +/// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersGoogleClientSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// htpasswd enables user authentication using an HTPasswd file to validate credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersHtpasswd { + /// fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fileData")] + pub file_data: Option, +} + +/// fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersHtpasswdFileData { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// keystone enables user authentication using keystone password credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersKeystone { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// domainName is required for keystone v3 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainName")] + pub domain_name: Option, + /// tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientCert")] + pub tls_client_cert: Option, + /// tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientKey")] + pub tls_client_key: Option, + /// url is the remote URL to connect to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersKeystoneCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersKeystoneTlsClientCert { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersKeystoneTlsClientKey { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// ldap enables user authentication using LDAP credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersLdap { + /// attributes maps LDAP attributes to identities + #[serde(default, skip_serializing_if = "Option::is_none")] + pub attributes: Option, + /// bindDN is an optional DN to bind with during the search phase. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bindDN")] + pub bind_dn: Option, + /// bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bindPassword")] + pub bind_password: Option, + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub insecure: Option, + /// url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// attributes maps LDAP attributes to identities +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersLdapAttributes { + /// email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + #[serde(default, skip_serializing_if = "Option::is_none")] + pub email: Option>, + /// id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option>, + /// name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option>, + /// preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredUsername")] + pub preferred_username: Option>, +} + +/// bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersLdapBindPassword { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersLdapCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// openID enables user authentication using OpenID credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersOpenId { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// claims mappings + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claims: Option, + /// clientID is the oauth client ID + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientID")] + pub client_id: Option, + /// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientSecret")] + pub client_secret: Option, + /// extraAuthorizeParameters are any custom parameters to add to the authorize request. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "extraAuthorizeParameters")] + pub extra_authorize_parameters: Option>, + /// extraScopes are any scopes to request in addition to the standard "openid" scope. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "extraScopes")] + pub extra_scopes: Option>, + /// issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub issuer: Option, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersOpenIdCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// claims mappings +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersOpenIdClaims { + /// email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + #[serde(default, skip_serializing_if = "Option::is_none")] + pub email: Option>, + /// groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub groups: Option>, + /// name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option>, + /// preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredUsername")] + pub preferred_username: Option>, +} + +/// clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersOpenIdClientSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// requestHeader enables user authentication using request header credentials +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersRequestHeader { + /// ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "challengeURL")] + pub challenge_url: Option, + /// clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientCommonNames")] + pub client_common_names: Option>, + /// emailHeaders is the set of headers to check for the email address + #[serde(default, skip_serializing_if = "Option::is_none", rename = "emailHeaders")] + pub email_headers: Option>, + /// headers is the set of headers to check for identity information + #[serde(default, skip_serializing_if = "Option::is_none")] + pub headers: Option>, + /// loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loginURL")] + pub login_url: Option, + /// nameHeaders is the set of headers to check for the display name + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nameHeaders")] + pub name_headers: Option>, + /// preferredUsernameHeaders is the set of headers to check for the preferred username + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredUsernameHeaders")] + pub preferred_username_headers: Option>, +} + +/// ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthIdentityProvidersRequestHeaderCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// templates allow you to customize pages like the login page. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthTemplates { + /// error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error: Option, + /// login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub login: Option, + /// providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerSelection")] + pub provider_selection: Option, +} + +/// error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthTemplatesError { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthTemplatesLogin { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthTemplatesProviderSelection { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// tokenConfig contains options for authorization and access tokens +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthTokenConfig { + /// accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as "5m", "1.5h" or "2h45m". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. + /// WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessTokenInactivityTimeout")] + pub access_token_inactivity_timeout: Option, + /// accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessTokenInactivityTimeoutSeconds")] + pub access_token_inactivity_timeout_seconds: Option, + /// accessTokenMaxAgeSeconds defines the maximum age of access tokens + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessTokenMaxAgeSeconds")] + pub access_token_max_age_seconds: Option, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OAuthStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/operatorhubs.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/operatorhubs.rs new file mode 100644 index 000000000..774e7f33a --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/operatorhubs.rs @@ -0,0 +1,57 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/operatorhubs.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// OperatorHubSpec defines the desired state of OperatorHub +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "OperatorHub", plural = "operatorhubs")] +#[kube(status = "OperatorHubStatus")] +#[kube(schema = "disabled")] +pub struct OperatorHubSpec { + /// disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableAllDefaultSources")] + pub disable_all_default_sources: Option, + /// sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sources: Option>, +} + +/// HubSource is used to specify the hub source and its configuration +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OperatorHubSources { + /// disabled is used to disable a default hub source on cluster + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option, + /// name is the name of one of the default hub sources + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OperatorHubStatus { + /// sources encapsulates the result of applying the configuration for each hub source + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sources: Option>, +} + +/// HubSourceStatus is used to reflect the current state of applying the configuration to a default source +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OperatorHubStatusSources { + /// disabled is used to disable a default hub source on cluster + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option, + /// message provides more information regarding failures + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// name is the name of one of the default hub sources + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// status indicates success or failure in applying the configuration + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/projects.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/projects.rs new file mode 100644 index 000000000..b5e28e93a --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/projects.rs @@ -0,0 +1,34 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/projects.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Project", plural = "projects")] +#[kube(status = "ProjectStatus")] +#[kube(schema = "disabled")] +pub struct ProjectSpec { + /// projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectRequestMessage")] + pub project_request_message: Option, + /// projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectRequestTemplate")] + pub project_request_template: Option, +} + +/// projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ProjectProjectRequestTemplate { + /// name is the metadata.name of the referenced project request template + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ProjectStatus { +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/proxies.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/proxies.rs new file mode 100644 index 000000000..7f0b79865 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/proxies.rs @@ -0,0 +1,55 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/proxies.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// Spec holds user-settable values for the proxy configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Proxy", plural = "proxies")] +#[kube(status = "ProxyStatus")] +#[kube(schema = "disabled")] +pub struct ProxySpec { + /// httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpProxy")] + pub http_proxy: Option, + /// httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpsProxy")] + pub https_proxy: Option, + /// noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "noProxy")] + pub no_proxy: Option, + /// readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessEndpoints")] + pub readiness_endpoints: Option>, + /// trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key "ca-bundle.crt", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. + /// The namespace for the ConfigMap referenced by trustedCA is "openshift-config". Here is an example ConfigMap (in yaml): + /// apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE----- + #[serde(default, skip_serializing_if = "Option::is_none", rename = "trustedCA")] + pub trusted_ca: Option, +} + +/// trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key "ca-bundle.crt", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" in the "openshift-config-managed" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. +/// The namespace for the ConfigMap referenced by trustedCA is "openshift-config". Here is an example ConfigMap (in yaml): +/// apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE----- +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ProxyTrustedCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ProxyStatus { + /// httpProxy is the URL of the proxy for HTTP requests. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpProxy")] + pub http_proxy: Option, + /// httpsProxy is the URL of the proxy for HTTPS requests. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpsProxy")] + pub https_proxy: Option, + /// noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "noProxy")] + pub no_proxy: Option, +} + diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/schedulers.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/schedulers.rs new file mode 100644 index 000000000..fb90ab230 --- /dev/null +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/schedulers.rs @@ -0,0 +1,50 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/config.openshift.io/v1/schedulers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "config.openshift.io", version = "v1", kind = "Scheduler", plural = "schedulers")] +#[kube(status = "SchedulerStatus")] +#[kube(schema = "disabled")] +pub struct SchedulerSpec { + /// defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: "type=user-node,region=east" would set nodeSelector field in pod spec to "type=user-node,region=east" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: "type=user-node,region=west" means that the default of "type=user-node,region=east" set in defaultNodeSelector would not be applied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultNodeSelector")] + pub default_node_selector: Option, + /// MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mastersSchedulable")] + pub masters_schedulable: Option, + /// DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub policy: Option, + /// profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods. + /// Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" Defaults to "LowNodeUtilization" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub profile: Option, +} + +/// DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SchedulerPolicy { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum SchedulerProfile { + #[serde(rename = "")] + KopiumEmpty, + LowNodeUtilization, + HighNodeUtilization, + NoScoring, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SchedulerStatus { +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/mod.rs b/kube-custom-resources-rs/src/console_openshift_io/mod.rs new file mode 100644 index 000000000..21c0fbff9 --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/mod.rs @@ -0,0 +1,2 @@ +pub mod v1; +pub mod v1alpha1; diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1/consoleplugins.rs b/kube-custom-resources-rs/src/console_openshift_io/v1/consoleplugins.rs new file mode 100644 index 000000000..aec956d60 --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1/consoleplugins.rs @@ -0,0 +1,128 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/console.openshift.io/v1/consoleplugins.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ConsolePluginSpec is the desired plugin configuration. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "console.openshift.io", version = "v1", kind = "ConsolePlugin", plural = "consoleplugins")] +#[kube(schema = "disabled")] +pub struct ConsolePluginSpec { + /// backend holds the configuration of backend which is serving console's plugin . + pub backend: ConsolePluginBackend, + /// displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters. + #[serde(rename = "displayName")] + pub display_name: String, + /// i18n is the configuration of plugin's localization resources. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub i18n: Option, + /// proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub proxy: Option>, +} + +/// backend holds the configuration of backend which is serving console's plugin . +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginBackend { + /// service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service: Option, + /// type is the backend type which servers the console's plugin. Currently only "Service" is supported. + /// --- + #[serde(rename = "type")] + pub r#type: ConsolePluginBackendType, +} + +/// service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginBackendService { + /// basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "basePath")] + pub base_path: Option, + /// name of Service that is serving the plugin assets. + pub name: String, + /// namespace of Service that is serving the plugin assets. + pub namespace: String, + /// port on which the Service that is serving the plugin is listening to. + pub port: i32, +} + +/// backend holds the configuration of backend which is serving console's plugin . +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsolePluginBackendType { + Service, +} + +/// i18n is the configuration of plugin's localization resources. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginI18n { + /// loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type. + #[serde(rename = "loadType")] + pub load_type: ConsolePluginI18nLoadType, +} + +/// i18n is the configuration of plugin's localization resources. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsolePluginI18nLoadType { + Preload, + Lazy, + #[serde(rename = "")] + KopiumEmpty, +} + +/// ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginProxy { + /// alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: + /// /api/proxy/plugin///? + /// Request example path: + /// /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver + pub alias: String, + /// authorization provides information about authorization type, which the proxied request should contain + #[serde(default, skip_serializing_if = "Option::is_none")] + pub authorization: Option, + /// caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCertificate")] + pub ca_certificate: Option, + /// endpoint provides information about endpoint to which the request is proxied to. + pub endpoint: ConsolePluginProxyEndpoint, +} + +/// ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsolePluginProxyAuthorization { + UserToken, + None, +} + +/// endpoint provides information about endpoint to which the request is proxied to. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginProxyEndpoint { + /// service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service: Option, + /// type is the type of the console plugin's proxy. Currently only "Service" is supported. + /// --- + #[serde(rename = "type")] + pub r#type: ConsolePluginProxyEndpointType, +} + +/// service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsolePluginProxyEndpointService { + /// name of Service that the plugin needs to connect to. + pub name: String, + /// namespace of Service that the plugin needs to connect to + pub namespace: String, + /// port on which the Service that the plugin needs to connect to is listening on. + pub port: i32, +} + +/// endpoint provides information about endpoint to which the request is proxied to. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsolePluginProxyEndpointType { + Service, +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1/consolequickstarts.rs b/kube-custom-resources-rs/src/console_openshift_io/v1/consolequickstarts.rs new file mode 100644 index 000000000..3ff701ba9 --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1/consolequickstarts.rs @@ -0,0 +1,104 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/console.openshift.io/v1/consolequickstarts.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ConsoleQuickStartSpec is the desired quick start configuration. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "console.openshift.io", version = "v1", kind = "ConsoleQuickStart", plural = "consolequickstarts")] +#[kube(schema = "disabled")] +pub struct ConsoleQuickStartSpec { + /// accessReviewResources contains a list of resources that the user's access will be reviewed against in order for the user to complete the Quick Start. The Quick Start will be hidden if any of the access reviews fail. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessReviewResources")] + pub access_review_resources: Option>, + /// conclusion sums up the Quick Start and suggests the possible next steps. (includes markdown) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conclusion: Option, + /// description is the description of the Quick Start. (includes markdown) + pub description: String, + /// displayName is the display name of the Quick Start. + #[serde(rename = "displayName")] + pub display_name: String, + /// durationMinutes describes approximately how many minutes it will take to complete the Quick Start. + #[serde(rename = "durationMinutes")] + pub duration_minutes: i64, + /// icon is a base64 encoded image that will be displayed beside the Quick Start display name. The icon should be an vector image for easy scaling. The size of the icon should be 40x40. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub icon: Option, + /// introduction describes the purpose of the Quick Start. (includes markdown) + pub introduction: String, + /// nextQuickStart is a list of the following Quick Starts, suggested for the user to try. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nextQuickStart")] + pub next_quick_start: Option>, + /// prerequisites contains all prerequisites that need to be met before taking a Quick Start. (includes markdown) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub prerequisites: Option>, + /// tags is a list of strings that describe the Quick Start. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, + /// tasks is the list of steps the user has to perform to complete the Quick Start. + pub tasks: Vec, +} + +/// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleQuickStartAccessReviewResources { + /// Group is the API Group of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Resource is one of the existing resource types. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, + /// Subresource is one of the existing resource types. "" means none. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subresource: Option, + /// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub verb: Option, + /// Version is the API Version of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// ConsoleQuickStartTask is a single step in a Quick Start. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleQuickStartTasks { + /// description describes the steps needed to complete the task. (includes markdown) + pub description: String, + /// review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. using a radio button, which indicates whether the step was completed successfully. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub review: Option, + /// summary contains information about the passed step. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub summary: Option, + /// title describes the task and is displayed as a step heading. + pub title: String, +} + +/// review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. using a radio button, which indicates whether the step was completed successfully. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleQuickStartTasksReview { + /// failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) + #[serde(rename = "failedTaskHelp")] + pub failed_task_help: String, + /// instructions contains steps that user needs to take in order to validate his work after going through a task. (includes markdown) + pub instructions: String, +} + +/// summary contains information about the passed step. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleQuickStartTasksSummary { + /// failed briefly describes the unsuccessfully passed task. (includes markdown) + pub failed: String, + /// success describes the succesfully passed task. + pub success: String, +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1/consolesamples.rs b/kube-custom-resources-rs/src/console_openshift_io/v1/consolesamples.rs new file mode 100644 index 000000000..88554fe0f --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1/consolesamples.rs @@ -0,0 +1,123 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/console.openshift.io/v1/consolesamples.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec contains configuration for a console sample. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "console.openshift.io", version = "v1", kind = "ConsoleSample", plural = "consolesamples")] +#[kube(schema = "disabled")] +pub struct ConsoleSampleSpec { + /// abstract is a short introduction to the sample. + /// It is required and must be no more than 100 characters in length. + /// The abstract is shown on the sample card tile below the title and provider and is limited to three lines of content. + #[serde(rename = "abstract")] + pub r#abstract: String, + /// description is a long form explanation of the sample. + /// It is required and can have a maximum length of **4096** characters. + /// It is a README.md-like content for additional information, links, pre-conditions, and other instructions. It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting. + pub description: String, + /// icon is an optional base64 encoded image and shown beside the sample title. + /// The format must follow the data: URL format and can have a maximum size of **10 KB**. + /// data:[][;base64], + /// For example: + /// data:image;base64, plus the base64 encoded image. + /// Vector images can also be used. SVG icons must start with: + /// data:image/svg+xml;base64, plus the base64 encoded SVG image. + /// All sample catalog icons will be shown on a white background (also when the dark theme is used). The web console ensures that different aspect ratios work correctly. Currently, the surface of the icon is at most 40x100px. + /// For more information on the data URL format, please visit https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub icon: Option, + /// provider is an optional label to honor who provides the sample. + /// It is optional and must be no more than 50 characters in length. + /// A provider can be a company like "Red Hat" or an organization like "CNCF" or "Knative". + /// Currently, the provider is only shown on the sample card tile below the title with the prefix "Provided by " + #[serde(default, skip_serializing_if = "Option::is_none")] + pub provider: Option, + /// source defines where to deploy the sample service from. The sample may be sourced from an external git repository or container image. + pub source: ConsoleSampleSource, + /// tags are optional string values that can be used to find samples in the samples catalog. + /// Examples of common tags may be "Java", "Quarkus", etc. + /// They will be displayed on the samples details page. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, + /// title is the display name of the sample. + /// It is required and must be no more than 50 characters in length. + pub title: String, + /// type is an optional label to group multiple samples. + /// It is optional and must be no more than 20 characters in length. + /// Recommendation is a singular term like "Builder Image", "Devfile" or "Serverless Function". + /// Currently, the type is shown a badge on the sample card tile in the top right corner. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// source defines where to deploy the sample service from. The sample may be sourced from an external git repository or container image. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSource { + /// containerImport allows the user import a container image. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerImport")] + pub container_import: Option, + /// gitImport allows the user to import code from a git repository. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gitImport")] + pub git_import: Option, + /// type of the sample, currently supported: "GitImport";"ContainerImport" + #[serde(rename = "type")] + pub r#type: String, +} + +/// containerImport allows the user import a container image. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSourceContainerImport { + /// reference to a container image that provides a HTTP service. The service must be exposed on the default port (8080) unless otherwise configured with the port field. + /// Supported formats: - / - docker.io// - quay.io// - quay.io//@sha256: - quay.io//: + pub image: String, + /// service contains configuration for the Service resource created for this sample. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service: Option, +} + +/// service contains configuration for the Service resource created for this sample. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSourceContainerImportService { + /// targetPort is the port that the service listens on for HTTP requests. This port will be used for Service and Route created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetPort")] + pub target_port: Option, +} + +/// gitImport allows the user to import code from a git repository. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSourceGitImport { + /// repository contains the reference to the actual Git repository. + pub repository: ConsoleSampleSourceGitImportRepository, + /// service contains configuration for the Service resource created for this sample. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service: Option, +} + +/// repository contains the reference to the actual Git repository. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSourceGitImportRepository { + /// contextDir is used to specify a directory within the repository to build the component. Must start with `/` and have a maximum length of 256 characters. When omitted, the default value is to build from the root of the repository. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "contextDir")] + pub context_dir: Option, + /// revision is the git revision at which to clone the git repository Can be used to clone a specific branch, tag or commit SHA. Must be at most 256 characters in length. When omitted the repository's default branch is used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub revision: Option, + /// url of the Git repository that contains a HTTP service. The HTTP service must be exposed on the default port (8080) unless otherwise configured with the port field. + /// Only public repositories on GitHub, GitLab and Bitbucket are currently supported: + /// - https://github.com// - https://gitlab.com// - https://bitbucket.org// + /// The url must have a maximum length of 256 characters. + pub url: String, +} + +/// service contains configuration for the Service resource created for this sample. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleSampleSourceGitImportService { + /// targetPort is the port that the service listens on for HTTP requests. This port will be used for Service created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetPort")] + pub target_port: Option, +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1/consoleyamlsamples.rs b/kube-custom-resources-rs/src/console_openshift_io/v1/consoleyamlsamples.rs new file mode 100644 index 000000000..21e0157fe --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1/consoleyamlsamples.rs @@ -0,0 +1,37 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/console.openshift.io/v1/consoleyamlsamples.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ConsoleYAMLSampleSpec is the desired YAML sample configuration. Samples will appear with their descriptions in a samples sidebar when creating a resources in the web console. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "console.openshift.io", version = "v1", kind = "ConsoleYAMLSample", plural = "consoleyamlsamples")] +#[kube(schema = "disabled")] +pub struct ConsoleYAMLSampleSpec { + /// description of the YAML sample. + pub description: String, + /// snippet indicates that the YAML sample is not the full YAML resource definition, but a fragment that can be inserted into the existing YAML document at the user's cursor. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snippet: Option, + /// targetResource contains apiVersion and kind of the resource YAML sample is representating. + #[serde(rename = "targetResource")] + pub target_resource: ConsoleYAMLSampleTargetResource, + /// title of the YAML sample. + pub title: String, + /// yaml is the YAML sample to display. + pub yaml: String, +} + +/// targetResource contains apiVersion and kind of the resource YAML sample is representating. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsoleYAMLSampleTargetResource { + /// APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/console_openshift_io/v1/mod.rs new file mode 100644 index 000000000..7a6b3e225 --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod consoleplugins; +pub mod consolequickstarts; +pub mod consolesamples; +pub mod consoleyamlsamples; diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/consoleplugins.rs b/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/consoleplugins.rs new file mode 100644 index 000000000..9d5582b98 --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/consoleplugins.rs @@ -0,0 +1,71 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/console.openshift.io/v1alpha1/consoleplugins.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ConsolePluginSpec is the desired plugin configuration. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "console.openshift.io", version = "v1alpha1", kind = "ConsolePlugin", plural = "consoleplugins")] +#[kube(schema = "disabled")] +pub struct ConsolePluginSpec { + /// displayName is the display name of the plugin. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "displayName")] + pub display_name: Option, + /// proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub proxy: Option>, + /// service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + pub service: ConsolePluginService, +} + +/// ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsolePluginProxy { + /// alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: + /// /api/proxy/plugin///? + /// Request example path: + /// /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver + pub alias: String, + /// authorize indicates if the proxied request should contain the logged-in user's OpenShift access token in the "Authorization" request header. For example: + /// Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 + /// By default the access token is not part of the proxied request. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub authorize: Option, + /// caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCertificate")] + pub ca_certificate: Option, + /// service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub service: Option, + /// type is the type of the console plugin's proxy. Currently only "Service" is supported. + #[serde(rename = "type")] + pub r#type: String, +} + +/// service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsolePluginProxyService { + /// name of Service that the plugin needs to connect to. + pub name: String, + /// namespace of Service that the plugin needs to connect to + pub namespace: String, + /// port on which the Service that the plugin needs to connect to is listening on. + pub port: i32, +} + +/// service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConsolePluginService { + /// basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + #[serde(rename = "basePath")] + pub base_path: String, + /// name of Service that is serving the plugin assets. + pub name: String, + /// namespace of Service that is serving the plugin assets. + pub namespace: String, + /// port on which the Service that is serving the plugin is listening to. + pub port: i32, +} + diff --git a/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/mod.rs b/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/mod.rs new file mode 100644 index 000000000..260916f4b --- /dev/null +++ b/kube-custom-resources-rs/src/console_openshift_io/v1alpha1/mod.rs @@ -0,0 +1 @@ +pub mod consoleplugins; diff --git a/kube-custom-resources-rs/src/controlplane_operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/mod.rs new file mode 100644 index 000000000..32a5a9d4f --- /dev/null +++ b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/mod.rs b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/mod.rs new file mode 100644 index 000000000..fde1b709e --- /dev/null +++ b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/mod.rs @@ -0,0 +1 @@ +pub mod podnetworkconnectivitychecks; diff --git a/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/podnetworkconnectivitychecks.rs b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/podnetworkconnectivitychecks.rs new file mode 100644 index 000000000..1d6063dc9 --- /dev/null +++ b/kube-custom-resources-rs/src/controlplane_operator_openshift_io/v1alpha1/podnetworkconnectivitychecks.rs @@ -0,0 +1,159 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/controlplane.operator.openshift.io/v1alpha1/podnetworkconnectivitychecks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// Spec defines the source and target of the connectivity check +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "controlplane.operator.openshift.io", version = "v1alpha1", kind = "PodNetworkConnectivityCheck", plural = "podnetworkconnectivitychecks")] +#[kube(namespaced)] +#[kube(status = "PodNetworkConnectivityCheckStatus")] +#[kube(schema = "disabled")] +pub struct PodNetworkConnectivityCheckSpec { + /// SourcePod names the pod from which the condition will be checked + #[serde(rename = "sourcePod")] + pub source_pod: String, + /// EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup. + #[serde(rename = "targetEndpoint")] + pub target_endpoint: String, + /// TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientCert")] + pub tls_client_cert: Option, +} + +/// TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckTlsClientCert { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// Status contains the observed status of the connectivity check +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatus { + /// Conditions summarize the status of the check + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// Failures contains logs of unsuccessful check actions + #[serde(default, skip_serializing_if = "Option::is_none")] + pub failures: Option>, + /// Outages contains logs of time periods of outages + #[serde(default, skip_serializing_if = "Option::is_none")] + pub outages: Option>, + /// Successes contains logs successful check actions + #[serde(default, skip_serializing_if = "Option::is_none")] + pub successes: Option>, +} + +/// PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusConditions { + /// Last time the condition transitioned from one status to another. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// Message indicating details about last transition in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason for the condition's last status transition in a machine readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Status of the condition + pub status: String, + /// Type of the condition + #[serde(rename = "type")] + pub r#type: String, +} + +/// LogEntry records events +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusFailures { + /// Latency records how long the action mentioned in the entry took. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub latency: Option, + /// Message explaining status in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason for status in a machine readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Success indicates if the log entry indicates a success or failure. + pub success: bool, + /// Start time of check action. + pub time: String, +} + +/// OutageEntry records time period of an outage +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusOutages { + /// End of outage detected + #[serde(default, skip_serializing_if = "Option::is_none")] + pub end: Option, + /// EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "endLogs")] + pub end_logs: Option>, + /// Message summarizes outage details in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Start of outage detected + pub start: String, + /// StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "startLogs")] + pub start_logs: Option>, +} + +/// LogEntry records events +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusOutagesEndLogs { + /// Latency records how long the action mentioned in the entry took. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub latency: Option, + /// Message explaining status in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason for status in a machine readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Success indicates if the log entry indicates a success or failure. + pub success: bool, + /// Start time of check action. + pub time: String, +} + +/// LogEntry records events +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusOutagesStartLogs { + /// Latency records how long the action mentioned in the entry took. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub latency: Option, + /// Message explaining status in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason for status in a machine readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Success indicates if the log entry indicates a success or failure. + pub success: bool, + /// Start time of check action. + pub time: String, +} + +/// LogEntry records events +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct PodNetworkConnectivityCheckStatusSuccesses { + /// Latency records how long the action mentioned in the entry took. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub latency: Option, + /// Message explaining status in a human readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason for status in a machine readable format. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Success indicates if the log entry indicates a success or failure. + pub success: bool, + /// Start time of check action. + pub time: String, +} + diff --git a/kube-custom-resources-rs/src/example_openshift_io/mod.rs b/kube-custom-resources-rs/src/example_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/example_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/example_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/example_openshift_io/v1/mod.rs new file mode 100644 index 000000000..7c482db72 --- /dev/null +++ b/kube-custom-resources-rs/src/example_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod stableconfigtypes; diff --git a/kube-custom-resources-rs/src/example_openshift_io/v1/stableconfigtypes.rs b/kube-custom-resources-rs/src/example_openshift_io/v1/stableconfigtypes.rs new file mode 100644 index 000000000..4bf8a4721 --- /dev/null +++ b/kube-custom-resources-rs/src/example_openshift_io/v1/stableconfigtypes.rs @@ -0,0 +1,112 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/example.openshift.io/v1/stableconfigtypes.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the specification of the desired behavior of the StableConfigType. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "example.openshift.io", version = "v1", kind = "StableConfigType", plural = "stableconfigtypes")] +#[kube(status = "StableConfigTypeStatus")] +#[kube(schema = "disabled")] +pub struct StableConfigTypeSpec { + /// celUnion demonstrates how to validate a discrminated union using CEL + #[serde(default, skip_serializing_if = "Option::is_none", rename = "celUnion")] + pub cel_union: Option, + /// evolvingUnion demonstrates how to phase in new values into discriminated union + #[serde(default, skip_serializing_if = "Option::is_none", rename = "evolvingUnion")] + pub evolving_union: Option, + /// immutableField is a field that is immutable once the object has been created. It is required at all times. + #[serde(rename = "immutableField")] + pub immutable_field: String, + /// optionalImmutableField is a field that is immutable once set. It is optional but may not be changed once set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "optionalImmutableField")] + pub optional_immutable_field: Option, + /// stableField is a field that is present on default clusters and on tech preview clusters + /// If empty, the platform will choose a good default, which may change over time without notice. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "stableField")] + pub stable_field: Option, +} + +/// celUnion demonstrates how to validate a discrminated union using CEL +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct StableConfigTypeCelUnion { + /// optionalMember is a union member that is optional. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "optionalMember")] + pub optional_member: Option, + /// requiredMember is a union member that is required. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredMember")] + pub required_member: Option, + /// type determines which of the union members should be populated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// celUnion demonstrates how to validate a discrminated union using CEL +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StableConfigTypeCelUnionType { + RequiredMember, + OptionalMember, + EmptyMember, +} + +/// evolvingUnion demonstrates how to phase in new values into discriminated union +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct StableConfigTypeEvolvingUnion { + /// type is the discriminator. It has different values for Default and for TechPreviewNoUpgrade + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// evolvingUnion demonstrates how to phase in new values into discriminated union +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StableConfigTypeEvolvingUnionType { + #[serde(rename = "")] + KopiumEmpty, + StableValue, +} + +/// status is the most recently observed status of the StableConfigType. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct StableConfigTypeStatus { + /// Represents the observations of a foo's current state. Known .status.conditions.type are: "Available", "Progressing", and "Degraded" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// immutableField is a field that is immutable once the object has been created. It is required at all times. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "immutableField")] + pub immutable_field: Option, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct StableConfigTypeStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: StableConfigTypeStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StableConfigTypeStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/helm_openshift_io/mod.rs b/kube-custom-resources-rs/src/helm_openshift_io/mod.rs new file mode 100644 index 000000000..9f64fc82d --- /dev/null +++ b/kube-custom-resources-rs/src/helm_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1beta1; diff --git a/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/helmchartrepositories.rs b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/helmchartrepositories.rs new file mode 100644 index 000000000..ef677807c --- /dev/null +++ b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/helmchartrepositories.rs @@ -0,0 +1,95 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/helm.openshift.io/v1beta1/helmchartrepositories.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "helm.openshift.io", version = "v1beta1", kind = "HelmChartRepository", plural = "helmchartrepositories")] +#[kube(status = "HelmChartRepositoryStatus")] +#[kube(schema = "disabled")] +pub struct HelmChartRepositorySpec { + /// Required configuration for connecting to the chart repo + #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectionConfig")] + pub connection_config: Option, + /// Optional human readable repository description, it can be used by UI for displaying purposes + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + /// If set to true, disable the repo usage in the cluster/namespace + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option, + /// Optional associated human readable repository name, it can be used by UI for displaying purposes + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// Required configuration for connecting to the chart repo +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct HelmChartRepositoryConnectionConfig { + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret is openshift-config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientConfig")] + pub tls_client_config: Option, + /// Chart repository URL + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct HelmChartRepositoryConnectionConfigCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret is openshift-config. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct HelmChartRepositoryConnectionConfigTlsClientConfig { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// Observed status of the repository within the cluster.. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct HelmChartRepositoryStatus { + /// conditions is a list of conditions and their statuses + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct HelmChartRepositoryStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: HelmChartRepositoryStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum HelmChartRepositoryStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/mod.rs b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/mod.rs new file mode 100644 index 000000000..f7792d7a7 --- /dev/null +++ b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/mod.rs @@ -0,0 +1,2 @@ +pub mod helmchartrepositories; +pub mod projecthelmchartrepositories; diff --git a/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/projecthelmchartrepositories.rs b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/projecthelmchartrepositories.rs new file mode 100644 index 000000000..c8897e143 --- /dev/null +++ b/kube-custom-resources-rs/src/helm_openshift_io/v1beta1/projecthelmchartrepositories.rs @@ -0,0 +1,106 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/helm.openshift.io/v1beta1/projecthelmchartrepositories.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "helm.openshift.io", version = "v1beta1", kind = "ProjectHelmChartRepository", plural = "projecthelmchartrepositories")] +#[kube(namespaced)] +#[kube(status = "ProjectHelmChartRepositoryStatus")] +#[kube(schema = "disabled")] +pub struct ProjectHelmChartRepositorySpec { + /// Required configuration for connecting to the chart repo + #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectionConfig")] + pub connection_config: Option, + /// Optional human readable repository description, it can be used by UI for displaying purposes + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + /// If set to true, disable the repo usage in the namespace + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option, + /// Optional associated human readable repository name, it can be used by UI for displaying purposes + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// Required configuration for connecting to the chart repo +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryConnectionConfig { + /// basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key "username" is used locate the username. The key "password" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "basicAuthConfig")] + pub basic_auth_config: Option, + /// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ca: Option, + /// tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsClientConfig")] + pub tls_client_config: Option, + /// Chart repository URL + #[serde(default, skip_serializing_if = "Option::is_none")] + pub url: Option, +} + +/// basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key "username" is used locate the username. The key "password" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryConnectionConfigBasicAuthConfig { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryConnectionConfigCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryConnectionConfigTlsClientConfig { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// Observed status of the repository within the namespace.. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryStatus { + /// conditions is a list of conditions and their statuses + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ProjectHelmChartRepositoryStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: ProjectHelmChartRepositoryStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ProjectHelmChartRepositoryStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/configs.rs b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/configs.rs new file mode 100644 index 000000000..feb4c67e5 --- /dev/null +++ b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/configs.rs @@ -0,0 +1,1310 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/configs.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// ImageRegistrySpec defines the specs for the running registry. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "imageregistry.operator.openshift.io", version = "v1", kind = "Config", plural = "configs")] +#[kube(status = "ConfigStatus")] +#[kube(schema = "disabled")] +pub struct ConfigSpec { + /// affinity is a group of node affinity scheduling rules for the image registry pod(s). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultRoute")] + pub default_route: Option, + /// disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableRedirect")] + pub disable_redirect: Option, + /// httpSecret is the value needed by the registry to secure uploads, generated by default. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpSecret")] + pub http_secret: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// logging is deprecated, use logLevel instead. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub logging: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// nodeSelector defines the node selection constraints for the registry pod. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] + pub node_selector: Option>, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// proxy defines the proxy to be used when calling master api, upstream registries, etc. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub proxy: Option, + /// readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] + pub read_only: Option, + /// replicas determines the number of registry instances to run. + pub replicas: i32, + /// requests controls how many parallel requests a given registry instance will handle before queuing additional requests. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests: Option, + /// resources defines the resource requests+limits for the registry pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option, + /// rolloutStrategy defines rollout strategy for the image registry deployment. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rolloutStrategy")] + pub rollout_strategy: Option, + /// routes defines additional external facing routes which should be created for the registry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// storage details for configuring registry storage, e.g. S3 bucket coordinates. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub storage: Option, + /// tolerations defines the tolerations for the registry pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, + /// topologySpreadConstraints specify how to spread matching pods among the given topology. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologySpreadConstraints")] + pub topology_spread_constraints: Option>, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// affinity is a group of node affinity scheduling rules for the image registry pod(s). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinity { + /// Describes node affinity scheduling rules for the pod. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinity")] + pub node_affinity: Option, + /// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAffinity")] + pub pod_affinity: Option, + /// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAntiAffinity")] + pub pod_anti_affinity: Option, +} + +/// Describes node affinity scheduling rules for the pod. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option, +} + +/// An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// A node selector term, associated with the corresponding weight. + pub preference: ConfigAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference, + /// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + pub weight: i32, +} + +/// A node selector term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference { + /// A list of node selector requirements by node's labels. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// A list of node selector requirements by node's fields. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// Required. A list of node selector terms. The terms are ORed. + #[serde(rename = "nodeSelectorTerms")] + pub node_selector_terms: Vec, +} + +/// A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms { + /// A list of node selector requirements by node's labels. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// A list of node selector requirements by node's fields. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +/// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// Required. A pod affinity term, associated with the corresponding weight. + #[serde(rename = "podAffinityTerm")] + pub pod_affinity_term: ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, + /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + pub weight: i32, +} + +/// Required. A pod affinity term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +/// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// Required. A pod affinity term, associated with the corresponding weight. + #[serde(rename = "podAffinityTerm")] + pub pod_affinity_term: ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, + /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + pub weight: i32, +} + +/// Required. A pod affinity term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ImageRegistrySpec defines the specs for the running registry. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// ImageRegistrySpec defines the specs for the running registry. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// proxy defines the proxy to be used when calling master api, upstream registries, etc. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigProxy { + /// http defines the proxy to be used by the image registry when accessing HTTP endpoints. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub http: Option, + /// https defines the proxy to be used by the image registry when accessing HTTPS endpoints. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub https: Option, + /// noProxy defines a comma-separated list of host names that shouldn't go through any proxy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "noProxy")] + pub no_proxy: Option, +} + +/// requests controls how many parallel requests a given registry instance will handle before queuing additional requests. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigRequests { + /// read defines limits for image registry's reads. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub read: Option, + /// write defines limits for image registry's writes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub write: Option, +} + +/// read defines limits for image registry's reads. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigRequestsRead { + /// maxInQueue sets the maximum queued api requests to the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxInQueue")] + pub max_in_queue: Option, + /// maxRunning sets the maximum in flight api requests to the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRunning")] + pub max_running: Option, + /// maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxWaitInQueue")] + pub max_wait_in_queue: Option, +} + +/// write defines limits for image registry's writes. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigRequestsWrite { + /// maxInQueue sets the maximum queued api requests to the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxInQueue")] + pub max_in_queue: Option, + /// maxRunning sets the maximum in flight api requests to the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRunning")] + pub max_running: Option, + /// maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxWaitInQueue")] + pub max_wait_in_queue: Option, +} + +/// resources defines the resource requests+limits for the registry pod. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigResources { + /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + /// This field is immutable. It can only be set for containers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claims: Option>, + /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub limits: Option>, + /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests: Option>, +} + +/// ResourceClaim references one entry in PodSpec.ResourceClaims. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigResourcesClaims { + /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + pub name: String, +} + +/// ImageRegistryConfigRoute holds information on external route access to image registry. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigRoutes { + /// hostname for the route. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostname: Option, + /// name of the route to be created. + pub name: String, + /// secretName points to secret containing the certificates to be used by the route. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] + pub secret_name: Option, +} + +/// storage details for configuring registry storage, e.g. S3 bucket coordinates. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorage { + /// azure represents configuration that uses Azure Blob Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option, + /// emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "emptyDir")] + pub empty_dir: Option, + /// gcs represents configuration that uses Google Cloud Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcs: Option, + /// ibmcos represents configuration that uses IBM Cloud Object Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibmcos: Option, + /// managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// Oss represents configuration that uses Alibaba Cloud Object Storage Service. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub oss: Option, + /// pvc represents configuration that uses a PersistentVolumeClaim. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pvc: Option, + /// s3 represents configuration that uses Amazon Simple Storage Service. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub s3: Option, + /// swift represents configuration that uses OpenStack Object Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub swift: Option, +} + +/// azure represents configuration that uses Azure Blob Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageAzure { + /// accountName defines the account to be used by the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accountName")] + pub account_name: Option, + /// cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudName")] + pub cloud_name: Option, + /// container defines Azure's container to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub container: Option, + /// networkAccess defines the network access properties for the storage account. Defaults to type: External. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkAccess")] + pub network_access: Option, +} + +/// networkAccess defines the network access properties for the storage account. Defaults to type: External. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageAzureNetworkAccess { + /// internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub internal: Option, + /// type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster's vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageAzureNetworkAccessInternal { + /// networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkResourceGroupName")] + pub network_resource_group_name: Option, + /// privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateEndpointName")] + pub private_endpoint_name: Option, + /// subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetName")] + pub subnet_name: Option, + /// vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "vnetName")] + pub vnet_name: Option, +} + +/// networkAccess defines the network access properties for the storage account. Defaults to type: External. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStorageAzureNetworkAccessType { + Internal, + External, +} + +/// emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageEmptyDir { +} + +/// gcs represents configuration that uses Google Cloud Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageGcs { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] + pub key_id: Option, + /// projectID is the Project ID of the GCP project that this bucket should be associated with. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectID")] + pub project_id: Option, + /// region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, +} + +/// ibmcos represents configuration that uses IBM Cloud Object Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageIbmcos { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub location: Option, + /// resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroupName")] + pub resource_group_name: Option, + /// resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceKeyCRN")] + pub resource_key_crn: Option, + /// serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceInstanceCRN")] + pub service_instance_crn: Option, +} + +/// Oss represents configuration that uses Alibaba Cloud Object Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageOss { + /// Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub encryption: Option, + /// EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointAccessibility")] + pub endpoint_accessibility: Option, + /// Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, +} + +/// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageOssEncryption { + /// KMS (key management service) is an encryption type that holds the struct for KMS KeyID + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kms: Option, + /// Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub method: Option, +} + +/// KMS (key management service) is an encryption type that holds the struct for KMS KeyID +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageOssEncryptionKms { + /// KeyID holds the KMS encryption key ID + #[serde(rename = "keyID")] + pub key_id: String, +} + +/// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStorageOssEncryptionMethod { + #[serde(rename = "KMS")] + Kms, + #[serde(rename = "AES256")] + Aes256, +} + +/// Oss represents configuration that uses Alibaba Cloud Object Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStorageOssEndpointAccessibility { + Internal, + Public, + #[serde(rename = "")] + KopiumEmpty, +} + +/// pvc represents configuration that uses a PersistentVolumeClaim. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStoragePvc { + /// claim defines the Persisent Volume Claim's name to be used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claim: Option, +} + +/// s3 represents configuration that uses Amazon Simple Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageS3 { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudFront")] + pub cloud_front: Option, + /// encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub encrypt: Option, + /// keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] + pub key_id: Option, + /// region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + /// regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "regionEndpoint")] + pub region_endpoint: Option, + /// trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. + /// The namespace for the config map referenced by trustedCA is "openshift-config". The key for the bundle in the config map is "ca-bundle.crt". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "trustedCA")] + pub trusted_ca: Option, + /// virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "virtualHostedStyle")] + pub virtual_hosted_style: Option, +} + +/// cloudFront configures Amazon Cloudfront as the storage middleware in a registry. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageS3CloudFront { + /// baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + #[serde(rename = "baseURL")] + pub base_url: String, + /// duration is the duration of the Cloudfront session. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub duration: Option, + /// keypairID is key pair ID provided by AWS. + #[serde(rename = "keypairID")] + pub keypair_id: String, + /// privateKey points to secret containing the private key, provided by AWS. + #[serde(rename = "privateKey")] + pub private_key: ConfigStorageS3CloudFrontPrivateKey, +} + +/// privateKey points to secret containing the private key, provided by AWS. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageS3CloudFrontPrivateKey { + /// The key of the secret to select from. Must be a valid secret key. + pub key: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Specify whether the Secret or its key must be defined + #[serde(default, skip_serializing_if = "Option::is_none")] + pub optional: Option, +} + +/// trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. +/// The namespace for the config map referenced by trustedCA is "openshift-config". The key for the bundle in the config map is "ca-bundle.crt". +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageS3TrustedCa { + /// name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// swift represents configuration that uses OpenStack Object Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStorageSwift { + /// authURL defines the URL for obtaining an authentication token. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "authURL")] + pub auth_url: Option, + /// authVersion specifies the OpenStack Auth's version. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "authVersion")] + pub auth_version: Option, + /// container defines the name of Swift container where to store the registry's data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub container: Option, + /// domain specifies Openstack's domain name for Identity v3 API. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// domainID specifies Openstack's domain id for Identity v3 API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainID")] + pub domain_id: Option, + /// regionName defines Openstack's region in which container exists. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "regionName")] + pub region_name: Option, + /// tenant defines Openstack tenant name to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tenant: Option, + /// tenant defines Openstack tenant id to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tenantID")] + pub tenant_id: Option, +} + +/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigTolerations { + /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub effect: Option, + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] + pub toleration_seconds: Option, + /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigTopologySpreadConstraints { + /// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. + /// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + /// MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. + #[serde(rename = "maxSkew")] + pub max_skew: i32, + /// MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + /// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. + /// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minDomains")] + pub min_domains: Option, + /// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + /// If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinityPolicy")] + pub node_affinity_policy: Option, + /// NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. + /// If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeTaintsPolicy")] + pub node_taints_policy: Option, + /// TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + #[serde(rename = "topologyKey")] + pub topology_key: String, + /// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + #[serde(rename = "whenUnsatisfiable")] + pub when_unsatisfiable: String, +} + +/// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigTopologySpreadConstraintsLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigTopologySpreadConstraintsLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ImageRegistryStatus reports image registry operational status. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// storage indicates the current applied storage configuration of the registry. + pub storage: ConfigStatusStorage, + /// storageManaged is deprecated, please refer to Storage.managementState + #[serde(rename = "storageManaged")] + pub storage_managed: bool, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// storage indicates the current applied storage configuration of the registry. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorage { + /// azure represents configuration that uses Azure Blob Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option, + /// emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "emptyDir")] + pub empty_dir: Option, + /// gcs represents configuration that uses Google Cloud Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcs: Option, + /// ibmcos represents configuration that uses IBM Cloud Object Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibmcos: Option, + /// managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// Oss represents configuration that uses Alibaba Cloud Object Storage Service. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub oss: Option, + /// pvc represents configuration that uses a PersistentVolumeClaim. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pvc: Option, + /// s3 represents configuration that uses Amazon Simple Storage Service. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub s3: Option, + /// swift represents configuration that uses OpenStack Object Storage. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub swift: Option, +} + +/// azure represents configuration that uses Azure Blob Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageAzure { + /// accountName defines the account to be used by the registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accountName")] + pub account_name: Option, + /// cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudName")] + pub cloud_name: Option, + /// container defines Azure's container to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub container: Option, + /// networkAccess defines the network access properties for the storage account. Defaults to type: External. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkAccess")] + pub network_access: Option, +} + +/// networkAccess defines the network access properties for the storage account. Defaults to type: External. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageAzureNetworkAccess { + /// internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub internal: Option, + /// type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster's vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to "External". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageAzureNetworkAccessInternal { + /// networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkResourceGroupName")] + pub network_resource_group_name: Option, + /// privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateEndpointName")] + pub private_endpoint_name: Option, + /// subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetName")] + pub subnet_name: Option, + /// vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "vnetName")] + pub vnet_name: Option, +} + +/// networkAccess defines the network access properties for the storage account. Defaults to type: External. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStatusStorageAzureNetworkAccessType { + Internal, + External, +} + +/// emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageEmptyDir { +} + +/// gcs represents configuration that uses Google Cloud Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageGcs { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] + pub key_id: Option, + /// projectID is the Project ID of the GCP project that this bucket should be associated with. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectID")] + pub project_id: Option, + /// region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, +} + +/// ibmcos represents configuration that uses IBM Cloud Object Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageIbmcos { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub location: Option, + /// resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroupName")] + pub resource_group_name: Option, + /// resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceKeyCRN")] + pub resource_key_crn: Option, + /// serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceInstanceCRN")] + pub service_instance_crn: Option, +} + +/// Oss represents configuration that uses Alibaba Cloud Object Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageOss { + /// Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + #[serde(default, skip_serializing_if = "Option::is_none")] + pub encryption: Option, + /// EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointAccessibility")] + pub endpoint_accessibility: Option, + /// Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, +} + +/// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageOssEncryption { + /// KMS (key management service) is an encryption type that holds the struct for KMS KeyID + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kms: Option, + /// Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub method: Option, +} + +/// KMS (key management service) is an encryption type that holds the struct for KMS KeyID +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageOssEncryptionKms { + /// KeyID holds the KMS encryption key ID + #[serde(rename = "keyID")] + pub key_id: String, +} + +/// Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStatusStorageOssEncryptionMethod { + #[serde(rename = "KMS")] + Kms, + #[serde(rename = "AES256")] + Aes256, +} + +/// Oss represents configuration that uses Alibaba Cloud Object Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigStatusStorageOssEndpointAccessibility { + Internal, + Public, + #[serde(rename = "")] + KopiumEmpty, +} + +/// pvc represents configuration that uses a PersistentVolumeClaim. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStoragePvc { + /// claim defines the Persisent Volume Claim's name to be used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claim: Option, +} + +/// s3 represents configuration that uses Amazon Simple Storage Service. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageS3 { + /// bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bucket: Option, + /// cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudFront")] + pub cloud_front: Option, + /// encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub encrypt: Option, + /// keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] + pub key_id: Option, + /// region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + /// regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "regionEndpoint")] + pub region_endpoint: Option, + /// trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. + /// The namespace for the config map referenced by trustedCA is "openshift-config". The key for the bundle in the config map is "ca-bundle.crt". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "trustedCA")] + pub trusted_ca: Option, + /// virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "virtualHostedStyle")] + pub virtual_hosted_style: Option, +} + +/// cloudFront configures Amazon Cloudfront as the storage middleware in a registry. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageS3CloudFront { + /// baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + #[serde(rename = "baseURL")] + pub base_url: String, + /// duration is the duration of the Cloudfront session. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub duration: Option, + /// keypairID is key pair ID provided by AWS. + #[serde(rename = "keypairID")] + pub keypair_id: String, + /// privateKey points to secret containing the private key, provided by AWS. + #[serde(rename = "privateKey")] + pub private_key: ConfigStatusStorageS3CloudFrontPrivateKey, +} + +/// privateKey points to secret containing the private key, provided by AWS. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageS3CloudFrontPrivateKey { + /// The key of the secret to select from. Must be a valid secret key. + pub key: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Specify whether the Secret or its key must be defined + #[serde(default, skip_serializing_if = "Option::is_none")] + pub optional: Option, +} + +/// trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. +/// The namespace for the config map referenced by trustedCA is "openshift-config". The key for the bundle in the config map is "ca-bundle.crt". +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageS3TrustedCa { + /// name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// swift represents configuration that uses OpenStack Object Storage. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusStorageSwift { + /// authURL defines the URL for obtaining an authentication token. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "authURL")] + pub auth_url: Option, + /// authVersion specifies the OpenStack Auth's version. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "authVersion")] + pub auth_version: Option, + /// container defines the name of Swift container where to store the registry's data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub container: Option, + /// domain specifies Openstack's domain name for Identity v3 API. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// domainID specifies Openstack's domain id for Identity v3 API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainID")] + pub domain_id: Option, + /// regionName defines Openstack's region in which container exists. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "regionName")] + pub region_name: Option, + /// tenant defines Openstack tenant name to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tenant: Option, + /// tenant defines Openstack tenant id to be used by registry. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tenantID")] + pub tenant_id: Option, +} + diff --git a/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/imagepruners.rs b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/imagepruners.rs new file mode 100644 index 000000000..fbf14f0f4 --- /dev/null +++ b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/imagepruners.rs @@ -0,0 +1,543 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/imageregistry.operator.openshift.io/v1/imagepruners.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// ImagePrunerSpec defines the specs for the running image pruner. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "imageregistry.operator.openshift.io", version = "v1", kind = "ImagePruner", plural = "imagepruners")] +#[kube(status = "ImagePrunerStatus")] +#[kube(schema = "disabled")] +pub struct ImagePrunerSpec { + /// affinity is a group of node affinity scheduling rules for the image pruner pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub affinity: Option, + /// failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedJobsHistoryLimit")] + pub failed_jobs_history_limit: Option, + /// ignoreInvalidImageReferences indicates whether the pruner can ignore errors while parsing image references. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ignoreInvalidImageReferences")] + pub ignore_invalid_image_references: Option, + /// keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 3. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keepTagRevisions")] + pub keep_tag_revisions: Option, + /// keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keepYoungerThan")] + pub keep_younger_than: Option, + /// keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 60m (60 minutes). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "keepYoungerThanDuration")] + pub keep_younger_than_duration: Option, + /// logLevel sets the level of log output for the pruner job. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// nodeSelector defines the node selection constraints for the image pruner pod. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] + pub node_selector: Option>, + /// resources defines the resource requests and limits for the image pruner pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option, + /// schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub schedule: Option, + /// successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "successfulJobsHistoryLimit")] + pub successful_jobs_history_limit: Option, + /// suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub suspend: Option, + /// tolerations defines the node tolerations for the image pruner pod. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +/// affinity is a group of node affinity scheduling rules for the image pruner pod. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinity { + /// Describes node affinity scheduling rules for the pod. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinity")] + pub node_affinity: Option, + /// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAffinity")] + pub pod_affinity: Option, + /// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "podAntiAffinity")] + pub pod_anti_affinity: Option, +} + +/// Describes node affinity scheduling rules for the pod. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option, +} + +/// An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// A node selector term, associated with the corresponding weight. + pub preference: ImagePrunerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference, + /// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + pub weight: i32, +} + +/// A node selector term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference { + /// A list of node selector requirements by node's labels. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// A list of node selector requirements by node's fields. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// Required. A list of node selector terms. The terms are ORed. + #[serde(rename = "nodeSelectorTerms")] + pub node_selector_terms: Vec, +} + +/// A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms { + /// A list of node selector requirements by node's labels. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// A list of node selector requirements by node's fields. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchFields")] + pub match_fields: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields { + /// The label key that the selector applies to. + pub key: String, + /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + pub operator: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +/// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// Required. A pod affinity term, associated with the corresponding weight. + #[serde(rename = "podAffinityTerm")] + pub pod_affinity_term: ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, + /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + pub weight: i32, +} + +/// Required. A pod affinity term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinity { + /// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] + pub preferred_during_scheduling_ignored_during_execution: Option>, + /// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] + pub required_during_scheduling_ignored_during_execution: Option>, +} + +/// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution { + /// Required. A pod affinity term, associated with the corresponding weight. + #[serde(rename = "podAffinityTerm")] + pub pod_affinity_term: ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, + /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + pub weight: i32, +} + +/// Required. A pod affinity term, associated with the corresponding weight. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { + /// A label query over a set of resources, in this case pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + #[serde(rename = "topologyKey")] + pub topology_key: String, +} + +/// A label query over a set of resources, in this case pods. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ImagePrunerSpec defines the specs for the running image pruner. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ImagePrunerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// resources defines the resource requests and limits for the image pruner pod. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerResources { + /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. + /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. + /// This field is immutable. It can only be set for containers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub claims: Option>, + /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub limits: Option>, + /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests: Option>, +} + +/// ResourceClaim references one entry in PodSpec.ResourceClaims. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerResourcesClaims { + /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + pub name: String, +} + +/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerTolerations { + /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub effect: Option, + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] + pub toleration_seconds: Option, + /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// ImagePrunerStatus reports image pruner operational status. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerStatus { + /// conditions is a list of conditions and their status. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// observedGeneration is the last generation change that has been applied. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ImagePrunerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + diff --git a/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/mod.rs new file mode 100644 index 000000000..b2f79d9fd --- /dev/null +++ b/kube-custom-resources-rs/src/imageregistry_operator_openshift_io/v1/mod.rs @@ -0,0 +1,2 @@ +pub mod configs; +pub mod imagepruners; diff --git a/kube-custom-resources-rs/src/ingress_operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/ingress_operator_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/ingress_operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/dnsrecords.rs b/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/dnsrecords.rs new file mode 100644 index 000000000..68d749908 --- /dev/null +++ b/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/dnsrecords.rs @@ -0,0 +1,99 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the dnsRecord. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "ingress.operator.openshift.io", version = "v1", kind = "DNSRecord", plural = "dnsrecords")] +#[kube(namespaced)] +#[kube(status = "DNSRecordStatus")] +#[kube(schema = "disabled")] +pub struct DNSRecordSpec { + /// dnsManagementPolicy denotes the current policy applied on the DNS record. Records that have policy set as "Unmanaged" are ignored by the ingress operator. This means that the DNS record on the cloud provider is not managed by the operator, and the "Published" status condition will be updated to "Unknown" status, since it is externally managed. Any existing record on the cloud provider can be deleted at the discretion of the cluster admin. + /// This field defaults to Managed. Valid values are "Managed" and "Unmanaged". + #[serde(rename = "dnsManagementPolicy")] + pub dns_management_policy: DNSRecordDnsManagementPolicy, + /// dnsName is the hostname of the DNS record + #[serde(rename = "dnsName")] + pub dns_name: String, + /// recordTTL is the record TTL in seconds. If zero, the default is 30. RecordTTL will not be used in AWS regions Alias targets, but will be used in CNAME targets, per AWS API contract. + #[serde(rename = "recordTTL")] + pub record_ttl: i64, + /// recordType is the DNS record type. For example, "A" or "CNAME". + #[serde(rename = "recordType")] + pub record_type: DNSRecordRecordType, + /// targets are record targets. + pub targets: Vec, +} + +/// spec is the specification of the desired behavior of the dnsRecord. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSRecordDnsManagementPolicy { + Managed, + Unmanaged, +} + +/// spec is the specification of the desired behavior of the dnsRecord. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSRecordRecordType { + #[serde(rename = "CNAME")] + Cname, + A, +} + +/// status is the most recently observed status of the dnsRecord. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSRecordStatus { + /// observedGeneration is the most recently observed generation of the DNSRecord. When the DNSRecord is updated, the controller updates the corresponding record in each managed zone. If an update for a particular zone fails, that failure is recorded in the status condition for the zone so that the controller can determine that it needs to retry the update for that specific zone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// zones are the status of the record in each zone. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub zones: Option>, +} + +/// DNSZoneStatus is the status of a record within a specific zone. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSRecordStatusZones { + /// conditions are any conditions associated with the record in the zone. + /// If publishing the record succeeds, the "Published" condition will be set with status "True" and upon failure it will be set to "False" along with the reason and message describing the cause of the failure. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// dnsZone is the zone where the record is published. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsZone")] + pub dns_zone: Option, +} + +/// DNSZoneCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSRecordStatusZonesConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + pub status: String, + #[serde(rename = "type")] + pub r#type: String, +} + +/// dnsZone is the zone where the record is published. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSRecordStatusZonesDnsZone { + /// id is the identifier that can be used to find the DNS hosted zone. + /// on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// tags can be used to query the DNS hosted zone. + /// on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + /// [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + diff --git a/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/mod.rs new file mode 100644 index 000000000..584f9ea3b --- /dev/null +++ b/kube-custom-resources-rs/src/ingress_operator_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod dnsrecords; diff --git a/kube-custom-resources-rs/src/insights_openshift_io/mod.rs b/kube-custom-resources-rs/src/insights_openshift_io/mod.rs new file mode 100644 index 000000000..32a5a9d4f --- /dev/null +++ b/kube-custom-resources-rs/src/insights_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/datagathers.rs b/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/datagathers.rs new file mode 100644 index 000000000..727756d19 --- /dev/null +++ b/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/datagathers.rs @@ -0,0 +1,213 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/insights.openshift.io/v1alpha1/datagathers.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "insights.openshift.io", version = "v1alpha1", kind = "DataGather", plural = "datagathers")] +#[kube(status = "DataGatherStatus")] +#[kube(schema = "disabled")] +pub struct DataGatherSpec { + /// dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are "ClearText" and "ObfuscateNetworking". When set to ClearText the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is ClearText. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataPolicy")] + pub data_policy: Option, + /// gatherers is a list of gatherers configurations. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gatherers: Option>, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherDataPolicy { + #[serde(rename = "")] + KopiumEmpty, + ClearText, + ObfuscateNetworking, +} + +/// gathererConfig allows to configure specific gatherers +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherGatherers { + /// name is the name of specific gatherer + pub name: String, + /// state allows you to configure specific gatherer. Valid values are "Enabled", "Disabled" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default. The current default is Enabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub state: Option, +} + +/// gathererConfig allows to configure specific gatherers +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherGatherersState { + #[serde(rename = "")] + KopiumEmpty, + Enabled, + Disabled, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatus { + /// conditions provide details on the status of the gatherer job. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// dataGatherState reflects the current state of the data gathering process. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataGatherState")] + pub data_gather_state: Option, + /// finishTime is the time when Insights data gathering finished. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "finishTime")] + pub finish_time: Option, + /// gatherers is a list of active gatherers (and their statuses) in the last gathering. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gatherers: Option>, + /// insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by "insightsRequestID") is not available. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "insightsReport")] + pub insights_report: Option, + /// insightsRequestID is an Insights request ID to track the status of the Insights analysis (in console.redhat.com processing pipeline) for the corresponding Insights data archive. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "insightsRequestID")] + pub insights_request_id: Option, + /// relatedObjects is a list of resources which are useful when debugging or inspecting the data gathering Pod + #[serde(default, skip_serializing_if = "Option::is_none", rename = "relatedObjects")] + pub related_objects: Option>, + /// startTime is the time when Insights data gathering started. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "startTime")] + pub start_time: Option, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: DataGatherStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherStatusConditionsStatus { + True, + False, + Unknown, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherStatusDataGatherState { + Running, + Completed, + Failed, + Pending, +} + +/// gathererStatus represents information about a particular data gatherer. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusGatherers { + /// conditions provide details on the status of each gatherer. + pub conditions: Vec, + /// lastGatherDuration represents the time spent gathering. + #[serde(rename = "lastGatherDuration")] + pub last_gather_duration: String, + /// name is the name of the gatherer. + pub name: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusGatherersConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: DataGatherStatusGatherersConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherStatusGatherersConditionsStatus { + True, + False, + Unknown, +} + +/// insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet or the corresponding Insights analysis (identified by "insightsRequestID") is not available. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusInsightsReport { + /// downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "downloadedAt")] + pub downloaded_at: Option, + /// healthChecks provides basic information about active Insights health checks in a cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "healthChecks")] + pub health_checks: Option>, + /// uri provides the URL link from which the report was downloaded. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uri: Option, +} + +/// healthCheck represents an Insights health check attributes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusInsightsReportHealthChecks { + /// advisorURI provides the URL link to the Insights Advisor. + #[serde(rename = "advisorURI")] + pub advisor_uri: String, + /// description provides basic description of the healtcheck. + pub description: String, + /// state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. + pub state: DataGatherStatusInsightsReportHealthChecksState, + /// totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. + #[serde(rename = "totalRisk")] + pub total_risk: i32, +} + +/// healthCheck represents an Insights health check attributes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DataGatherStatusInsightsReportHealthChecksState { + Enabled, + Disabled, +} + +/// ObjectReference contains enough information to let you inspect or modify the referred object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DataGatherStatusRelatedObjects { + /// group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc. + pub group: String, + /// name of the referent. + pub name: String, + /// namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc. + pub resource: String, +} + diff --git a/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/mod.rs b/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/mod.rs new file mode 100644 index 000000000..e21d01d9d --- /dev/null +++ b/kube-custom-resources-rs/src/insights_openshift_io/v1alpha1/mod.rs @@ -0,0 +1 @@ +pub mod datagathers; diff --git a/kube-custom-resources-rs/src/lib.rs b/kube-custom-resources-rs/src/lib.rs index 7af70dc72..ed19f5acd 100644 --- a/kube-custom-resources-rs/src/lib.rs +++ b/kube-custom-resources-rs/src/lib.rs @@ -295,6 +295,12 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `AwsAuthSyncConfig` +## authorization_openshift_io + +- apiVersion: `authorization.openshift.io/v1` +- kinds: + - `RoleBindingRestriction` + ## authzed_com - apiVersion: `authzed.com/v1alpha1` @@ -568,6 +574,12 @@ Every group has its own feature in this crate. The available features are as fol - `CiliumNodeConfig` - `CiliumPodIPPool` +## cloud_network_openshift_io + +- apiVersion: `cloud.network.openshift.io/v1` +- kinds: + - `CloudPrivateIPConfig` + ## cloudformation_linki_space - apiVersion: `cloudformation.linki.space/v1alpha1` @@ -642,6 +654,49 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `ClusterColocationProfile` +## config_openshift_io + +- apiVersion: `config.openshift.io/v1` +- kinds: + - `APIServer` + - `Authentication` + - `ClusterOperator` + - `ClusterVersion` + - `Console` + - `DNS` + - `FeatureGate` + - `ImageDigestMirrorSet` + - `Image` + - `ImageTagMirrorSet` + - `Infrastructure` + - `Ingress` + - `Network` + - `Node` + - `OAuth` + - `OperatorHub` + - `Project` + - `Proxy` + - `Scheduler` + +## console_openshift_io + +- apiVersion: `console.openshift.io/v1` +- kinds: + - `ConsolePlugin` + - `ConsoleQuickStart` + - `ConsoleSample` + - `ConsoleYAMLSample` + +- apiVersion: `console.openshift.io/v1alpha1` +- kinds: + - `ConsolePlugin` + +## controlplane_operator_openshift_io + +- apiVersion: `controlplane.operator.openshift.io/v1alpha1` +- kinds: + - `PodNetworkConnectivityCheck` + ## core_linuxsuren_github_com - apiVersion: `core.linuxsuren.github.com/v1alpha1` @@ -842,6 +897,12 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `EnterpriseSearch` +## example_openshift_io + +- apiVersion: `example.openshift.io/v1` +- kinds: + - `StableConfigType` + ## execution_furiko_io - apiVersion: `execution.furiko.io/v1alpha1` @@ -1021,6 +1082,13 @@ Every group has its own feature in this crate. The available features are as fol - `Map` - `WanReplication` +## helm_openshift_io + +- apiVersion: `helm.openshift.io/v1beta1` +- kinds: + - `HelmChartRepository` + - `ProjectHelmChartRepository` + ## helm_toolkit_fluxcd_io - apiVersion: `helm.toolkit.fluxcd.io/v2beta1` @@ -1106,6 +1174,13 @@ Every group has its own feature in this crate. The available features are as fol - `ImagePolicy` - `ImageRepository` +## imageregistry_operator_openshift_io + +- apiVersion: `imageregistry.operator.openshift.io/v1` +- kinds: + - `Config` + - `ImagePruner` + ## imaging_ingestion_alvearie_org - apiVersion: `imaging-ingestion.alvearie.org/v1alpha1` @@ -1172,6 +1247,18 @@ Every group has its own feature in this crate. The available features are as fol - `IBMVPCMachine` - `IBMVPCMachineTemplate` +## ingress_operator_openshift_io + +- apiVersion: `ingress.operator.openshift.io/v1` +- kinds: + - `DNSRecord` + +## insights_openshift_io + +- apiVersion: `insights.openshift.io/v1alpha1` +- kinds: + - `DataGather` + ## installation_mattermost_com - apiVersion: `installation.mattermost.com/v1beta1` @@ -1521,6 +1608,18 @@ Every group has its own feature in this crate. The available features are as fol - `VolumeAttachment` - `Volume` +## machine_openshift_io + +- apiVersion: `machine.openshift.io/v1` +- kinds: + - `ControlPlaneMachineSet` + +- apiVersion: `machine.openshift.io/v1beta1` +- kinds: + - `MachineHealthCheck` + - `Machine` + - `MachineSet` + ## machineconfiguration_openshift_io - apiVersion: `machineconfiguration.openshift.io/v1` @@ -1624,6 +1723,13 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `AlertmanagerConfig` +## monitoring_openshift_io + +- apiVersion: `monitoring.openshift.io/v1` +- kinds: + - `AlertingRule` + - `AlertRelabelConfig` + ## monocle_monocle_change_metrics_io - apiVersion: `monocle.monocle.change-metrics.io/v1alpha1` @@ -1689,6 +1795,21 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `RawDevice` +## network_openshift_io + +- apiVersion: `network.openshift.io/v1` +- kinds: + - `ClusterNetwork` + - `EgressNetworkPolicy` + - `HostSubnet` + - `NetNamespace` + +## network_operator_openshift_io + +- apiVersion: `network.operator.openshift.io/v1` +- kinds: + - `EgressRouter` + ## networking_karmada_io - apiVersion: `networking.karmada.io/v1alpha1` @@ -1810,6 +1931,31 @@ Every group has its own feature in this crate. The available features are as fol - `ClusterManager` - `Klusterlet` +## operator_openshift_io + +- apiVersion: `operator.openshift.io/v1` +- kinds: + - `Authentication` + - `CloudCredential` + - `ClusterCSIDriver` + - `Config` + - `Console` + - `CSISnapshotController` + - `DNS` + - `Etcd` + - `IngressController` + - `InsightsOperator` + - `KubeAPIServer` + - `KubeControllerManager` + - `KubeScheduler` + - `KubeStorageVersionMigrator` + - `MachineConfiguration` + - `Network` + - `OpenShiftAPIServer` + - `OpenShiftControllerManager` + - `ServiceCA` + - `Storage` + ## operator_shipwright_io - apiVersion: `operator.shipwright.io/v1alpha1` @@ -1858,6 +2004,12 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `Lock` +## platform_openshift_io + +- apiVersion: `platform.openshift.io/v1alpha1` +- kinds: + - `PlatformOperator` + ## policy_clusterpedia_io - apiVersion: `policy.clusterpedia.io/v1alpha1` @@ -1905,6 +2057,12 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `QuayRegistry` +## quota_openshift_io + +- apiVersion: `quota.openshift.io/v1` +- kinds: + - `ClusterResourceQuota` + ## ray_io - apiVersion: `ray.io/v1` @@ -1985,6 +2143,12 @@ Every group has its own feature in this crate. The available features are as fol - `NameService` - `TopicTransfer` +## route_openshift_io + +- apiVersion: `route.openshift.io/v1` +- kinds: + - `Route` + ## rules_kubeedge_io - apiVersion: `rules.kubeedge.io/v1` @@ -2029,6 +2193,12 @@ Every group has its own feature in this crate. The available features are as fol - `TransformJob` - `UserProfile` +## samples_operator_openshift_io + +- apiVersion: `samples.operator.openshift.io/v1` +- kinds: + - `Config` + ## scheduling_koordinator_sh - apiVersion: `scheduling.koordinator.sh/v1alpha1` @@ -2120,6 +2290,18 @@ Every group has its own feature in this crate. The available features are as fol - kinds: - `SeccompProfile` +## security_internal_openshift_io + +- apiVersion: `security.internal.openshift.io/v1` +- kinds: + - `RangeAllocation` + +## security_openshift_io + +- apiVersion: `security.openshift.io/v1` +- kinds: + - `SecurityContextConstraints` + ## servicebinding_io - apiVersion: `servicebinding.io/v1alpha3` @@ -2152,6 +2334,13 @@ Every group has its own feature in this crate. The available features are as fol - `Activity` - `StateMachine` +## sharedresource_openshift_io + +- apiVersion: `sharedresource.openshift.io/v1alpha1` +- kinds: + - `SharedConfigMap` + - `SharedSecret` + ## site_superedge_io - apiVersion: `site.superedge.io/v1alpha1` @@ -2434,6 +2623,8 @@ pub mod asdb_aerospike_com; pub mod atlasmap_io; #[cfg(feature = "auth_ops42_org")] pub mod auth_ops42_org; +#[cfg(feature = "authorization_openshift_io")] +pub mod authorization_openshift_io; #[cfg(feature = "authzed_com")] pub mod authzed_com; #[cfg(feature = "autoscaling_k8s_io")] @@ -2478,6 +2669,8 @@ pub mod chaosblade_io; pub mod che_eclipse_org; #[cfg(feature = "cilium_io")] pub mod cilium_io; +#[cfg(feature = "cloud_network_openshift_io")] +pub mod cloud_network_openshift_io; #[cfg(feature = "cloudformation_linki_space")] pub mod cloudformation_linki_space; #[cfg(feature = "cluster_clusterpedia_io")] @@ -2496,6 +2689,12 @@ pub mod config_grafana_com; pub mod config_karmada_io; #[cfg(feature = "config_koordinator_sh")] pub mod config_koordinator_sh; +#[cfg(feature = "config_openshift_io")] +pub mod config_openshift_io; +#[cfg(feature = "console_openshift_io")] +pub mod console_openshift_io; +#[cfg(feature = "controlplane_operator_openshift_io")] +pub mod controlplane_operator_openshift_io; #[cfg(feature = "core_linuxsuren_github_com")] pub mod core_linuxsuren_github_com; #[cfg(feature = "core_openfeature_dev")] @@ -2534,6 +2733,8 @@ pub mod elbv2_k8s_aws; pub mod emrcontainers_services_k8s_aws; #[cfg(feature = "enterprisesearch_k8s_elastic_co")] pub mod enterprisesearch_k8s_elastic_co; +#[cfg(feature = "example_openshift_io")] +pub mod example_openshift_io; #[cfg(feature = "execution_furiko_io")] pub mod execution_furiko_io; #[cfg(feature = "executor_testkube_io")] @@ -2572,6 +2773,8 @@ pub mod gitops_hybrid_cloud_patterns_io; pub mod grafana_integreatly_org; #[cfg(feature = "hazelcast_com")] pub mod hazelcast_com; +#[cfg(feature = "helm_openshift_io")] +pub mod helm_openshift_io; #[cfg(feature = "helm_toolkit_fluxcd_io")] pub mod helm_toolkit_fluxcd_io; #[cfg(feature = "hive_openshift_io")] @@ -2588,6 +2791,8 @@ pub mod iam_services_k8s_aws; pub mod ibmcloud_ibm_com; #[cfg(feature = "image_toolkit_fluxcd_io")] pub mod image_toolkit_fluxcd_io; +#[cfg(feature = "imageregistry_operator_openshift_io")] +pub mod imageregistry_operator_openshift_io; #[cfg(feature = "imaging_ingestion_alvearie_org")] pub mod imaging_ingestion_alvearie_org; #[cfg(feature = "inference_kubedl_io")] @@ -2596,6 +2801,10 @@ pub mod inference_kubedl_io; pub mod infinispan_org; #[cfg(feature = "infrastructure_cluster_x_k8s_io")] pub mod infrastructure_cluster_x_k8s_io; +#[cfg(feature = "ingress_operator_openshift_io")] +pub mod ingress_operator_openshift_io; +#[cfg(feature = "insights_openshift_io")] +pub mod insights_openshift_io; #[cfg(feature = "installation_mattermost_com")] pub mod installation_mattermost_com; #[cfg(feature = "integration_rock8s_com")] @@ -2658,6 +2867,8 @@ pub mod logging_banzaicloud_io; pub mod loki_grafana_com; #[cfg(feature = "longhorn_io")] pub mod longhorn_io; +#[cfg(feature = "machine_openshift_io")] +pub mod machine_openshift_io; #[cfg(feature = "machineconfiguration_openshift_io")] pub mod machineconfiguration_openshift_io; #[cfg(feature = "maps_k8s_elastic_co")] @@ -2678,6 +2889,8 @@ pub mod mirrors_kts_studio; pub mod model_kubedl_io; #[cfg(feature = "monitoring_coreos_com")] pub mod monitoring_coreos_com; +#[cfg(feature = "monitoring_openshift_io")] +pub mod monitoring_openshift_io; #[cfg(feature = "monocle_monocle_change_metrics_io")] pub mod monocle_monocle_change_metrics_io; #[cfg(feature = "mq_services_k8s_aws")] @@ -2690,6 +2903,10 @@ pub mod multicluster_x_k8s_io; pub mod mutations_gatekeeper_sh; #[cfg(feature = "nativestor_alauda_io")] pub mod nativestor_alauda_io; +#[cfg(feature = "network_openshift_io")] +pub mod network_openshift_io; +#[cfg(feature = "network_operator_openshift_io")] +pub mod network_operator_openshift_io; #[cfg(feature = "networking_karmada_io")] pub mod networking_karmada_io; #[cfg(feature = "nfd_k8s_sigs_io")] @@ -2718,6 +2935,8 @@ pub mod operator_cluster_x_k8s_io; pub mod operator_cryostat_io; #[cfg(feature = "operator_open_cluster_management_io")] pub mod operator_open_cluster_management_io; +#[cfg(feature = "operator_openshift_io")] +pub mod operator_openshift_io; #[cfg(feature = "operator_shipwright_io")] pub mod operator_shipwright_io; #[cfg(feature = "operator_tigera_io")] @@ -2728,6 +2947,8 @@ pub mod operator_victoriametrics_com; pub mod org_eclipse_che; #[cfg(feature = "pkg_crossplane_io")] pub mod pkg_crossplane_io; +#[cfg(feature = "platform_openshift_io")] +pub mod platform_openshift_io; #[cfg(feature = "policy_clusterpedia_io")] pub mod policy_clusterpedia_io; #[cfg(feature = "policy_karmada_io")] @@ -2740,6 +2961,8 @@ pub mod postgresql_cnpg_io; pub mod prometheusservice_services_k8s_aws; #[cfg(feature = "quay_redhat_com")] pub mod quay_redhat_com; +#[cfg(feature = "quota_openshift_io")] +pub mod quota_openshift_io; #[cfg(feature = "ray_io")] pub mod ray_io; #[cfg(feature = "rds_services_k8s_aws")] @@ -2756,6 +2979,8 @@ pub mod repo_manager_pulpproject_org; pub mod resources_teleport_dev; #[cfg(feature = "rocketmq_apache_org")] pub mod rocketmq_apache_org; +#[cfg(feature = "route_openshift_io")] +pub mod route_openshift_io; #[cfg(feature = "rules_kubeedge_io")] pub mod rules_kubeedge_io; #[cfg(feature = "runtime_cluster_x_k8s_io")] @@ -2764,6 +2989,8 @@ pub mod runtime_cluster_x_k8s_io; pub mod s3_services_k8s_aws; #[cfg(feature = "sagemaker_services_k8s_aws")] pub mod sagemaker_services_k8s_aws; +#[cfg(feature = "samples_operator_openshift_io")] +pub mod samples_operator_openshift_io; #[cfg(feature = "scheduling_koordinator_sh")] pub mod scheduling_koordinator_sh; #[cfg(feature = "scheduling_sigs_k8s_io")] @@ -2784,6 +3011,10 @@ pub mod secrets_hashicorp_com; pub mod secscan_quay_redhat_com; #[cfg(feature = "security_profiles_operator_x_k8s_io")] pub mod security_profiles_operator_x_k8s_io; +#[cfg(feature = "security_internal_openshift_io")] +pub mod security_internal_openshift_io; +#[cfg(feature = "security_openshift_io")] +pub mod security_openshift_io; #[cfg(feature = "servicebinding_io")] pub mod servicebinding_io; #[cfg(feature = "services_k8s_aws")] @@ -2792,6 +3023,8 @@ pub mod services_k8s_aws; pub mod serving_kubedl_io; #[cfg(feature = "sfn_services_k8s_aws")] pub mod sfn_services_k8s_aws; +#[cfg(feature = "sharedresource_openshift_io")] +pub mod sharedresource_openshift_io; #[cfg(feature = "site_superedge_io")] pub mod site_superedge_io; #[cfg(feature = "slo_koordinator_sh")] diff --git a/kube-custom-resources-rs/src/machine_openshift_io/mod.rs b/kube-custom-resources-rs/src/machine_openshift_io/mod.rs new file mode 100644 index 000000000..517ec8539 --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/mod.rs @@ -0,0 +1,2 @@ +pub mod v1; +pub mod v1beta1; diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1/controlplanemachinesets.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1/controlplanemachinesets.rs new file mode 100644 index 000000000..0601323c4 --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1/controlplanemachinesets.rs @@ -0,0 +1,448 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/machine.openshift.io/v1/controlplanemachinesets.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "machine.openshift.io", version = "v1", kind = "ControlPlaneMachineSet", plural = "controlplanemachinesets")] +#[kube(namespaced)] +#[kube(status = "ControlPlaneMachineSetStatus")] +#[kube(schema = "disabled")] +pub struct ControlPlaneMachineSetSpec { + /// Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + pub replicas: i32, + /// Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + pub selector: ControlPlaneMachineSetSelector, + /// State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub state: Option, + /// Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub strategy: Option, + /// Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + pub template: ControlPlaneMachineSetTemplate, +} + +/// ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetReplicas { + #[serde(rename = "3")] + r#_3, + #[serde(rename = "5")] + r#_5, +} + +/// Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetState { + Active, + Inactive, +} + +/// Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetStrategy { + /// Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetStrategyType { + RollingUpdate, + OnDelete, +} + +/// Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplate { + /// MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + #[serde(rename = "machineType")] + pub machine_type: ControlPlaneMachineSetTemplateMachineType, + /// OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub machines_v1beta1_machine_openshift_io: Option, +} + +/// Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetTemplateMachineType { + #[serde(rename = "machines_v1beta1_machine_openshift_io")] + MachinesV1beta1MachineOpenshiftIo, +} + +/// OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIo { + /// FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option, + /// ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector. + pub metadata: ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoMetadata, + /// Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + pub spec: ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpec, +} + +/// FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomains { + /// AWS configures failure domain information for the AWS platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option>, + /// Azure configures failure domain information for the Azure platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option>, + /// GCP configures failure domain information for the GCP platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option>, + /// nutanix configures failure domain information for the Nutanix platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nutanix: Option>, + /// OpenStack configures failure domain information for the OpenStack platform. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub openstack: Option>, + /// Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. + pub platform: ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsPlatform, +} + +/// AWSFailureDomain configures failure domain information for the AWS platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAws { + /// Placement configures the placement information for this instance. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub placement: Option, + /// Subnet is a reference to the subnet to use for this instance. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subnet: Option, +} + +/// Placement configures the placement information for this instance. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAwsPlacement { + /// AvailabilityZone is the availability zone of the instance. + #[serde(rename = "availabilityZone")] + pub availability_zone: String, +} + +/// Subnet is a reference to the subnet to use for this instance. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAwsSubnet { + /// ARN of resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub arn: Option, + /// Filters is a set of filters used to identify a resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub filters: Option>, + /// ID of resource. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub id: Option, + /// Type determines how the reference will fetch the AWS resource. + #[serde(rename = "type")] + pub r#type: ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAwsSubnetType, +} + +/// AWSResourceFilter is a filter used to identify an AWS resource +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAwsSubnetFilters { + /// Name of the filter. Filter names are case-sensitive. + pub name: String, + /// Values includes one or more filter values. Filter values are case-sensitive. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Subnet is a reference to the subnet to use for this instance. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAwsSubnetType { + #[serde(rename = "ID")] + Id, + #[serde(rename = "ARN")] + Arn, + Filters, +} + +/// AzureFailureDomain configures failure domain information for the Azure platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsAzure { + /// subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subnet: Option, + /// Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + pub zone: String, +} + +/// GCPFailureDomain configures failure domain information for the GCP platform +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsGcp { + /// Zone is the zone in which the GCP machine provider will create the VM. + pub zone: String, +} + +/// NutanixFailureDomainReference refers to the failure domain of the Nutanix platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsNutanix { + /// name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. + pub name: String, +} + +/// OpenStackFailureDomain configures failure domain information for the OpenStack platform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsOpenstack { + /// availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZone")] + pub availability_zone: Option, + /// rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rootVolume")] + pub root_volume: Option, +} + +/// rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsOpenstackRootVolume { + /// availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZone")] + pub availability_zone: Option, + /// volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + #[serde(rename = "volumeType")] + pub volume_type: String, +} + +/// FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoFailureDomainsPlatform { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Libvirt, + OpenStack, + None, + VSphere, + #[serde(rename = "oVirt")] + OVirt, + #[serde(rename = "IBMCloud")] + IbmCloud, + KubeVirt, + EquinixMetal, + #[serde(rename = "PowerVS")] + PowerVs, + AlibabaCloud, + Nutanix, + External, +} + +/// ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'. + pub labels: BTreeMap, +} + +/// Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpec { + /// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleHooks")] + pub lifecycle_hooks: Option, + /// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ProviderSpec details Provider-specific configuration to use during node creation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerSpec")] + pub provider_spec: Option, + /// The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + #[serde(default, skip_serializing_if = "Option::is_none")] + pub taints: Option>, +} + +/// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecLifecycleHooks { + /// PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preDrain")] + pub pre_drain: Option>, + /// PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preTerminate")] + pub pre_terminate: Option>, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecLifecycleHooksPreDrain { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecLifecycleHooksPreTerminate { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + /// If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + /// Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + /// Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// ProviderSpec details Provider-specific configuration to use during node creation. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecProviderSpec { + /// Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +/// The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetTemplateMachinesV1beta1MachineOpenshiftIoSpecTaints { + /// Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + pub effect: String, + /// Required. The taint key to be applied to a node. + pub key: String, + /// TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeAdded")] + pub time_added: Option, + /// The taint value corresponding to the taint key. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetStatus { + /// Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unavailableReplicas")] + pub unavailable_replicas: Option, + /// UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "updatedReplicas")] + pub updated_replicas: Option, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ControlPlaneMachineSetStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: ControlPlaneMachineSetStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ControlPlaneMachineSetStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1/mod.rs new file mode 100644 index 000000000..774972461 --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod controlplanemachinesets; diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinehealthchecks.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinehealthchecks.rs new file mode 100644 index 000000000..9d4b401a1 --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinehealthchecks.rs @@ -0,0 +1,136 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinehealthchecks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// Specification of machine health check policy +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "machine.openshift.io", version = "v1beta1", kind = "MachineHealthCheck", plural = "machinehealthchecks")] +#[kube(namespaced)] +#[kube(status = "MachineHealthCheckStatus")] +#[kube(schema = "disabled")] +pub struct MachineHealthCheckSpec { + /// Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxUnhealthy")] + pub max_unhealthy: Option, + /// Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to "0". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStartupTimeout")] + pub node_startup_timeout: Option, + /// RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. + /// This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "remediationTemplate")] + pub remediation_template: Option, + /// Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, + /// UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unhealthyConditions")] + pub unhealthy_conditions: Option>, +} + +/// RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. +/// This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckRemediationTemplate { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckUnhealthyConditions { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + /// Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub timeout: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// Most recently observed status of MachineHealthCheck resource +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckStatus { + /// Conditions defines the current state of the MachineHealthCheck + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// total number of machines counted by this machine health check + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentHealthy")] + pub current_healthy: Option, + /// total number of machines counted by this machine health check + #[serde(default, skip_serializing_if = "Option::is_none", rename = "expectedMachines")] + pub expected_machines: Option, + /// RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied + #[serde(default, skip_serializing_if = "Option::is_none", rename = "remediationsAllowed")] + pub remediations_allowed: Option, +} + +/// Condition defines an observation of a Machine API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machines.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machines.rs new file mode 100644 index 000000000..473ebf1aa --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machines.rs @@ -0,0 +1,242 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/machine.openshift.io/v1beta1/machines.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// MachineSpec defines the desired state of Machine +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "machine.openshift.io", version = "v1beta1", kind = "Machine", plural = "machines")] +#[kube(namespaced)] +#[kube(status = "MachineStatus")] +#[kube(schema = "disabled")] +pub struct MachineSpec { + /// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleHooks")] + pub lifecycle_hooks: Option, + /// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ProviderSpec details Provider-specific configuration to use during node creation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerSpec")] + pub provider_spec: Option, + /// The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + #[serde(default, skip_serializing_if = "Option::is_none")] + pub taints: Option>, +} + +/// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineLifecycleHooks { + /// PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preDrain")] + pub pre_drain: Option>, + /// PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preTerminate")] + pub pre_terminate: Option>, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineLifecycleHooksPreDrain { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineLifecycleHooksPreTerminate { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + /// If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + /// Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + /// Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// ProviderSpec details Provider-specific configuration to use during node creation. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineProviderSpec { + /// Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +/// The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineTaints { + /// Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + pub effect: String, + /// Required. The taint key to be applied to a node. + pub key: String, + /// TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeAdded")] + pub time_added: Option, + /// The taint value corresponding to the taint key. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// MachineStatus defines the observed state of Machine +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatus { + /// Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// Conditions defines the current state of the Machine + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "errorMessage")] + pub error_message: Option, + /// ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "errorReason")] + pub error_reason: Option, + /// LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastOperation")] + pub last_operation: Option, + /// LastUpdated identifies when this status was last observed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastUpdated")] + pub last_updated: Option, + /// NodeRef will point to the corresponding Node if it exists. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeRef")] + pub node_ref: Option, + /// Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting + #[serde(default, skip_serializing_if = "Option::is_none")] + pub phase: Option, + /// ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerStatus")] + pub provider_status: Option>, +} + +/// NodeAddress contains information for the node's address. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusAddresses { + /// The node address. + pub address: String, + /// Node address type, one of Hostname, ExternalIP or InternalIP. + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition defines an observation of a Machine API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusLastOperation { + /// Description is the human-readable description of the last operation. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + /// LastUpdated is the timestamp at which LastOperation API was last-updated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastUpdated")] + pub last_updated: Option, + /// State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc + #[serde(default, skip_serializing_if = "Option::is_none")] + pub state: Option, + /// Type is the type of operation which was last performed. E.g. Create, Delete, Update etc + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// NodeRef will point to the corresponding Node if it exists. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusNodeRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinesets.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinesets.rs new file mode 100644 index 000000000..5a5743b0a --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/machinesets.rs @@ -0,0 +1,265 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/machine.openshift.io/v1beta1/machinesets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// MachineSetSpec defines the desired state of MachineSet +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "machine.openshift.io", version = "v1beta1", kind = "MachineSet", plural = "machinesets")] +#[kube(namespaced)] +#[kube(status = "MachineSetStatus")] +#[kube(schema = "disabled")] +pub struct MachineSetSpec { + /// DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletePolicy")] + pub delete_policy: Option, + /// MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReadySeconds")] + pub min_ready_seconds: Option, + /// Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, + /// Template is the object that describes the machine that will be created if insufficient replicas are detected. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub template: Option, +} + +/// MachineSetSpec defines the desired state of MachineSet +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum MachineSetDeletePolicy { + Random, + Newest, + Oldest, +} + +/// Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Template is the object that describes the machine that will be created if insufficient replicas are detected. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplate { + /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub spec: Option, +} + +/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + /// If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + /// Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + /// Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpec { + /// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleHooks")] + pub lifecycle_hooks: Option, + /// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ProviderSpec details Provider-specific configuration to use during node creation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerSpec")] + pub provider_spec: Option, + /// The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + #[serde(default, skip_serializing_if = "Option::is_none")] + pub taints: Option>, +} + +/// LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecLifecycleHooks { + /// PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preDrain")] + pub pre_drain: Option>, + /// PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preTerminate")] + pub pre_terminate: Option>, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecLifecycleHooksPreDrain { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// LifecycleHook represents a single instance of a lifecycle hook +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecLifecycleHooksPreTerminate { + /// Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + pub name: String, + /// Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + pub owner: String, +} + +/// ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + /// If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + /// Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + /// Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// ProviderSpec details Provider-specific configuration to use during node creation. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecProviderSpec { + /// Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +/// The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecTaints { + /// Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + pub effect: String, + /// Required. The taint key to be applied to a node. + pub key: String, + /// TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeAdded")] + pub time_added: Option, + /// The taint value corresponding to the taint key. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// MachineSetStatus defines the observed state of MachineSet +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetStatus { + /// The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableReplicas")] + pub available_replicas: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "errorMessage")] + pub error_message: Option, + /// In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption. + /// These fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "errorReason")] + pub error_reason: Option, + /// The number of replicas that have labels matching the labels of the machine template of the MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fullyLabeledReplicas")] + pub fully_labeled_replicas: Option, + /// ObservedGeneration reflects the generation of the most recently observed MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// Replicas is the most recently observed number of replicas. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, +} + diff --git a/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/mod.rs b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/mod.rs new file mode 100644 index 000000000..feaa97d81 --- /dev/null +++ b/kube-custom-resources-rs/src/machine_openshift_io/v1beta1/mod.rs @@ -0,0 +1,3 @@ +pub mod machinehealthchecks; +pub mod machines; +pub mod machinesets; diff --git a/kube-custom-resources-rs/src/monitoring_openshift_io/mod.rs b/kube-custom-resources-rs/src/monitoring_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/monitoring_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertingrules.rs b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertingrules.rs new file mode 100644 index 000000000..7f0f1d010 --- /dev/null +++ b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertingrules.rs @@ -0,0 +1,69 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/monitoring.openshift.io/v1/alertingrules.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// spec describes the desired state of this AlertingRule object. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "monitoring.openshift.io", version = "v1", kind = "AlertingRule", plural = "alertingrules")] +#[kube(namespaced)] +#[kube(status = "AlertingRuleStatus")] +#[kube(schema = "disabled")] +pub struct AlertingRuleSpec { + /// groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed. + /// It's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case. + pub groups: Vec, +} + +/// RuleGroup is a list of sequentially evaluated alerting rules. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AlertingRuleGroups { + /// interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval + #[serde(default, skip_serializing_if = "Option::is_none")] + pub interval: Option, + /// name is the name of the group. + pub name: String, + /// rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed. + pub rules: Vec, +} + +/// Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AlertingRuleGroupsRules { + /// alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character. + pub alert: String, + /// annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple "1" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing "Watchdog" alert in order to ensure the alerting pipeline is functional. + pub expr: IntOrString, + /// for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "for")] + pub r#for: Option, + /// labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key: + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// status describes the current state of this AlertOverrides object. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AlertingRuleStatus { + /// observedGeneration is the last generation change you've dealt with. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "prometheusRule")] + pub prometheus_rule: Option, +} + +/// prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AlertingRuleStatusPrometheusRule { + /// name of the referenced PrometheusRule. + pub name: String, +} + diff --git a/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertrelabelconfigs.rs b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertrelabelconfigs.rs new file mode 100644 index 000000000..c25ae851c --- /dev/null +++ b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/alertrelabelconfigs.rs @@ -0,0 +1,96 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/monitoring.openshift.io/v1/alertrelabelconfigs.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec describes the desired state of this AlertRelabelConfig object. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "monitoring.openshift.io", version = "v1", kind = "AlertRelabelConfig", plural = "alertrelabelconfigs")] +#[kube(namespaced)] +#[kube(status = "AlertRelabelConfigStatus")] +#[kube(schema = "disabled")] +pub struct AlertRelabelConfigSpec { + /// configs is a list of sequentially evaluated alert relabel configs. + pub configs: Vec, +} + +/// RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct AlertRelabelConfigConfigs { + /// action to perform based on regex matching. Must be one of: 'Replace', 'Keep', 'Drop', 'HashMod', 'LabelMap', 'LabelDrop', or 'LabelKeep'. Default is: 'Replace' + #[serde(default, skip_serializing_if = "Option::is_none")] + pub action: Option, + /// modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'. This is only valid if sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub modulus: Option, + /// regex against which the extracted value is matched. Default is: '(.*)' regex is required for all actions except 'HashMod' + #[serde(default, skip_serializing_if = "Option::is_none")] + pub regex: Option, + /// replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is 'Replace' or 'LabelMap' and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available. Default is: '$1' + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replacement: Option, + /// separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub separator: Option, + /// sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the 'Replace', 'Keep', and 'Drop' actions. Not allowed for actions 'LabelKeep' and 'LabelDrop'. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceLabels")] + pub source_labels: Option>, + /// targetLabel to which the resulting value is written in a 'Replace' action. It is required for 'Replace' and 'HashMod' actions and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetLabel")] + pub target_label: Option, +} + +/// RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum AlertRelabelConfigConfigsAction { + Replace, + Keep, + Drop, + HashMod, + LabelMap, + LabelDrop, + LabelKeep, +} + +/// status describes the current state of this AlertRelabelConfig object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct AlertRelabelConfigStatus { + /// conditions contains details on the state of the AlertRelabelConfig, may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct AlertRelabelConfigStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: AlertRelabelConfigStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum AlertRelabelConfigStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/monitoring_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/mod.rs new file mode 100644 index 000000000..4f1c7890b --- /dev/null +++ b/kube-custom-resources-rs/src/monitoring_openshift_io/v1/mod.rs @@ -0,0 +1,2 @@ +pub mod alertingrules; +pub mod alertrelabelconfigs; diff --git a/kube-custom-resources-rs/src/network_openshift_io/mod.rs b/kube-custom-resources-rs/src/network_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/network_openshift_io/v1/clusternetworks.rs b/kube-custom-resources-rs/src/network_openshift_io/v1/clusternetworks.rs new file mode 100644 index 000000000..a28f435aa --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/v1/clusternetworks.rs @@ -0,0 +1,18 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/network.openshift.io/v1/clusternetworks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + + +use serde::{Serialize, Deserialize}; + +/// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterNetworkClusterNetworks { + /// CIDR defines the total range of a cluster networks address space. + #[serde(rename = "CIDR")] + pub cidr: String, + /// HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + #[serde(rename = "hostSubnetLength")] + pub host_subnet_length: i32, +} + diff --git a/kube-custom-resources-rs/src/network_openshift_io/v1/egressnetworkpolicies.rs b/kube-custom-resources-rs/src/network_openshift_io/v1/egressnetworkpolicies.rs new file mode 100644 index 000000000..83ccddd89 --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/v1/egressnetworkpolicies.rs @@ -0,0 +1,38 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/network.openshift.io/v1/egressnetworkpolicies.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the specification of the current egress network policy +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "network.openshift.io", version = "v1", kind = "EgressNetworkPolicy", plural = "egressnetworkpolicies")] +#[kube(namespaced)] +#[kube(schema = "disabled")] +pub struct EgressNetworkPolicySpec { + /// egress contains the list of egress policy rules + pub egress: Vec, +} + +/// EgressNetworkPolicyRule contains a single egress network policy rule +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EgressNetworkPolicyEgress { + /// to is the target that traffic is allowed/denied to + pub to: EgressNetworkPolicyEgressTo, + /// type marks this as an "Allow" or "Deny" rule + #[serde(rename = "type")] + pub r#type: String, +} + +/// to is the target that traffic is allowed/denied to +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EgressNetworkPolicyEgressTo { + /// CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cidrSelector")] + pub cidr_selector: Option, + /// DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsName")] + pub dns_name: Option, +} + diff --git a/kube-custom-resources-rs/src/network_openshift_io/v1/hostsubnets.rs b/kube-custom-resources-rs/src/network_openshift_io/v1/hostsubnets.rs new file mode 100644 index 000000000..7cb14d256 --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/v1/hostsubnets.rs @@ -0,0 +1,7 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/network.openshift.io/v1/hostsubnets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + + + + diff --git a/kube-custom-resources-rs/src/network_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/network_openshift_io/v1/mod.rs new file mode 100644 index 000000000..741ba91ad --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod clusternetworks; +pub mod egressnetworkpolicies; +pub mod hostsubnets; +pub mod netnamespaces; diff --git a/kube-custom-resources-rs/src/network_openshift_io/v1/netnamespaces.rs b/kube-custom-resources-rs/src/network_openshift_io/v1/netnamespaces.rs new file mode 100644 index 000000000..c50e2fabe --- /dev/null +++ b/kube-custom-resources-rs/src/network_openshift_io/v1/netnamespaces.rs @@ -0,0 +1,7 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/network.openshift.io/v1/netnamespaces.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + + + + diff --git a/kube-custom-resources-rs/src/network_operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/network_operator_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/network_operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/network_operator_openshift_io/v1/egressrouters.rs b/kube-custom-resources-rs/src/network_operator_openshift_io/v1/egressrouters.rs new file mode 100644 index 000000000..17544de0c --- /dev/null +++ b/kube-custom-resources-rs/src/network_operator_openshift_io/v1/egressrouters.rs @@ -0,0 +1,149 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/network.operator.openshift.io/v1/egressrouters.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// Specification of the desired egress router. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "network.operator.openshift.io", version = "v1", kind = "EgressRouter", plural = "egressrouters")] +#[kube(namespaced)] +#[kube(status = "EgressRouterStatus")] +#[kube(schema = "disabled")] +pub struct EgressRouterSpec { + /// List of IP addresses to configure on the pod's secondary interface. + pub addresses: Vec, + /// Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + pub mode: EgressRouterMode, + /// Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported. + #[serde(rename = "networkInterface")] + pub network_interface: EgressRouterNetworkInterface, + /// Redirect represents the configuration parameters specific to redirect mode. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub redirect: Option, +} + +/// EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterAddresses { + /// IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway: Option, + /// IP is the address to configure on the router's interface. Can be IPv4 or IPv6. + pub ip: String, +} + +/// Specification of the desired egress router. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EgressRouterMode { + Redirect, +} + +/// Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterNetworkInterface { + /// Arguments specific to the interfaceType macvlan + #[serde(default, skip_serializing_if = "Option::is_none")] + pub macvlan: Option, +} + +/// Arguments specific to the interfaceType macvlan +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterNetworkInterfaceMacvlan { + /// Name of the master interface. Need not be specified if it can be inferred from the IP address. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub master: Option, + /// Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + pub mode: EgressRouterNetworkInterfaceMacvlanMode, +} + +/// Arguments specific to the interfaceType macvlan +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EgressRouterNetworkInterfaceMacvlanMode { + Bridge, + Private, + #[serde(rename = "VEPA")] + Vepa, + Passthru, +} + +/// Redirect represents the configuration parameters specific to redirect mode. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterRedirect { + /// FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fallbackIP")] + pub fallback_ip: Option, + /// List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "redirectRules")] + pub redirect_rules: Option>, +} + +/// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterRedirectRedirectRules { + /// IP specifies the remote destination's IP address. Can be IPv4 or IPv6. + #[serde(rename = "destinationIP")] + pub destination_ip: String, + /// Port is the port number to which clients should send traffic to be redirected. + pub port: i32, + /// Protocol can be TCP, SCTP or UDP. + pub protocol: EgressRouterRedirectRedirectRulesProtocol, + /// TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from "Port" is used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetPort")] + pub target_port: Option, +} + +/// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EgressRouterRedirectRedirectRulesProtocol { + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "UDP")] + Udp, + #[serde(rename = "SCTP")] + Sctp, +} + +/// Observed status of EgressRouter. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterStatus { + /// Observed status of the egress router + pub conditions: Vec, +} + +/// EgressRouterStatusCondition represents the state of the egress router's managed and monitored components. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct EgressRouterStatusConditions { + /// LastTransitionTime is the time of the last update to the current status property. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// Reason is the CamelCase reason for the condition's current status. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: EgressRouterStatusConditionsStatus, + /// Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded + #[serde(rename = "type")] + pub r#type: EgressRouterStatusConditionsType, +} + +/// EgressRouterStatusCondition represents the state of the egress router's managed and monitored components. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EgressRouterStatusConditionsStatus { + True, + False, + Unknown, +} + +/// EgressRouterStatusCondition represents the state of the egress router's managed and monitored components. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EgressRouterStatusConditionsType { + Available, + Progressing, + Degraded, +} + diff --git a/kube-custom-resources-rs/src/network_operator_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/network_operator_openshift_io/v1/mod.rs new file mode 100644 index 000000000..3695261c3 --- /dev/null +++ b/kube-custom-resources-rs/src/network_operator_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod egressrouters; diff --git a/kube-custom-resources-rs/src/operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/operator_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/authentications.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/authentications.rs new file mode 100644 index 000000000..dfb78081b --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/authentications.rs @@ -0,0 +1,120 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/authentications.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Authentication", plural = "authentications")] +#[kube(status = "AuthenticationStatus")] +#[kube(schema = "disabled")] +pub struct AuthenticationSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum AuthenticationLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum AuthenticationOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// OAuthAPIServer holds status specific only to oauth-apiserver + #[serde(default, skip_serializing_if = "Option::is_none", rename = "oauthAPIServer")] + pub oauth_api_server: Option, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// OAuthAPIServer holds status specific only to oauth-apiserver +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AuthenticationStatusOauthApiServer { + /// LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/cloudcredentials.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/cloudcredentials.rs new file mode 100644 index 000000000..4fd3994d7 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/cloudcredentials.rs @@ -0,0 +1,126 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/cloudcredentials.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "CloudCredential", plural = "cloudcredentials")] +#[kube(status = "CloudCredentialStatus")] +#[kube(schema = "disabled")] +pub struct CloudCredentialSpec { + /// CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into "manual" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes: AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" Others: Do not set value as other platforms only support running in "Passthrough" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "credentialsMode")] + pub credentials_mode: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CloudCredentialCredentialsMode { + #[serde(rename = "")] + KopiumEmpty, + Manual, + Mint, + Passthrough, +} + +/// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CloudCredentialLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CloudCredentialOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// CloudCredentialStatus defines the observed status of the cloud-credential-operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CloudCredentialStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CloudCredentialStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CloudCredentialStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/clustercsidrivers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/clustercsidrivers.rs new file mode 100644 index 000000000..5e56d111e --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/clustercsidrivers.rs @@ -0,0 +1,237 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "ClusterCSIDriver", plural = "clustercsidrivers")] +#[kube(status = "ClusterCSIDriverStatus")] +#[kube(schema = "disabled")] +pub struct ClusterCSIDriverSpec { + /// driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "driverConfig")] + pub driver_config: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClassState")] + pub storage_class_state: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfig { + /// aws is used to configure the AWS CSI driver. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// azure is used to configure the Azure CSI driver. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub azure: Option, + /// driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP. + #[serde(rename = "driverType")] + pub driver_type: ClusterCSIDriverDriverConfigDriverType, + /// gcp is used to configure the GCP CSI driver. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option, + /// ibmcloud is used to configure the IBM Cloud CSI driver. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibmcloud: Option, + /// vsphere is used to configure the vsphere CSI driver. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "vSphere")] + pub v_sphere: Option, +} + +/// aws is used to configure the AWS CSI driver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigAws { + /// kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyARN")] + pub kms_key_arn: Option, +} + +/// azure is used to configure the Azure CSI driver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigAzure { + /// diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskEncryptionSet")] + pub disk_encryption_set: Option, +} + +/// diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigAzureDiskEncryptionSet { + /// name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length. + pub name: String, + /// resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length. + #[serde(rename = "resourceGroup")] + pub resource_group: String, + /// subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 + #[serde(rename = "subscriptionID")] + pub subscription_id: String, +} + +/// driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterCSIDriverDriverConfigDriverType { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "AWS")] + Aws, + Azure, + #[serde(rename = "GCP")] + Gcp, + #[serde(rename = "IBMCloud")] + IbmCloud, + #[serde(rename = "vSphere")] + VSphere, +} + +/// gcp is used to configure the GCP CSI driver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigGcp { + /// kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKey")] + pub kms_key: Option, +} + +/// kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigGcpKmsKey { + /// keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + #[serde(rename = "keyRing")] + pub key_ring: String, + /// location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or "global". Defaults to global, if not set. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub location: Option, + /// name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + pub name: String, + /// projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. + #[serde(rename = "projectID")] + pub project_id: String, +} + +/// ibmcloud is used to configure the IBM Cloud CSI driver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigIbmcloud { + /// encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes. + #[serde(rename = "encryptionKeyCRN")] + pub encryption_key_crn: String, +} + +/// vsphere is used to configure the vsphere CSI driver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverDriverConfigVSphere { + /// topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologyCategories")] + pub topology_categories: Option>, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterCSIDriverLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterCSIDriverOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterCSIDriverStorageClassState { + #[serde(rename = "")] + KopiumEmpty, + Managed, + Unmanaged, + Removed, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterCSIDriverStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/configs.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/configs.rs new file mode 100644 index 000000000..a51b8581a --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/configs.rs @@ -0,0 +1,113 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/configs.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Config Operator. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Config", plural = "configs")] +#[kube(status = "ConfigStatus")] +#[kube(schema = "disabled")] +pub struct ConfigSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the Config Operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Config Operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConfigOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status defines the observed status of the Config Operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/consoles.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/consoles.rs new file mode 100644 index 000000000..7d1addd1d --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/consoles.rs @@ -0,0 +1,404 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/consoles.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// ConsoleSpec is the specification of the desired behavior of the Console. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Console", plural = "consoles")] +#[kube(status = "ConsoleStatus")] +#[kube(schema = "disabled")] +pub struct ConsoleSpec { + /// customization is used to optionally provide a small set of customization options to the web console. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub customization: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// plugins defines a list of enabled console plugin names. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub plugins: Option>, + /// providers contains configuration for using specific service providers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub providers: Option, + /// route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED + #[serde(default, skip_serializing_if = "Option::is_none")] + pub route: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// customization is used to optionally provide a small set of customization options to the web console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomization { + /// addPage allows customizing actions on the Add page in developer perspective. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "addPage")] + pub add_page: Option, + /// brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub brand: Option, + /// customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customLogoFile")] + pub custom_logo_file: Option, + /// customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customProductName")] + pub custom_product_name: Option, + /// developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "developerCatalog")] + pub developer_catalog: Option, + /// documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "documentationBaseURL")] + pub documentation_base_url: Option, + /// perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub perspectives: Option>, + /// projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "projectAccess")] + pub project_access: Option, + /// quickStarts allows customization of available ConsoleQuickStart resources in console. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "quickStarts")] + pub quick_starts: Option, +} + +/// addPage allows customizing actions on the Add page in developer perspective. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationAddPage { + /// disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disabledActions")] + pub disabled_actions: Option>, +} + +/// customization is used to optionally provide a small set of customization options to the web console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsoleCustomizationBrand { + #[serde(rename = "openshift")] + Openshift, + OpenShift, + #[serde(rename = "OKD")] + Okd, + Online, + #[serde(rename = "OCP")] + Ocp, + Dedicated, + Azure, + #[serde(rename = "ROSA")] + Rosa, +} + +/// customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationCustomLogoFile { + /// Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationDeveloperCatalog { + /// categories which are shown in the developer catalog. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub categories: Option>, + /// types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub types: Option, +} + +/// DeveloperConsoleCatalogCategory for the developer console catalog. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationDeveloperCatalogCategories { + /// ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + pub id: String, + /// label defines a category display label. It is required and must have 1-64 characters. + pub label: String, + /// subcategories defines a list of child categories. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subcategories: Option>, + /// tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +/// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationDeveloperCatalogCategoriesSubcategories { + /// ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + pub id: String, + /// label defines a category display label. It is required and must have 1-64 characters. + pub label: String, + /// tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +/// types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationDeveloperCatalogTypes { + /// disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: "Devfile", "HelmChart", "BuilderImage" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option>, + /// enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: "Devfile", "HelmChart", "BuilderImage" If the list is non-empty, a new type will not be shown to the user until it is added to list. If the list is empty the complete developer catalog will be shown. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub enabled: Option>, + /// state defines if a list of catalog types should be enabled or disabled. + pub state: ConsoleCustomizationDeveloperCatalogTypesState, +} + +/// types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsoleCustomizationDeveloperCatalogTypesState { + Enabled, + Disabled, +} + +/// Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectives { + /// id defines the id of the perspective. Example: "dev", "admin". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored. + pub id: String, + /// pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. The list of available Kubernetes resources could be read via `kubectl api-resources`. The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. Incorrect or unknown resources will be ignored. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "pinnedResources")] + pub pinned_resources: Option>, + /// visibility defines the state of perspective along with access review checks if needed for that perspective. + pub visibility: ConsoleCustomizationPerspectivesVisibility, +} + +/// PinnedResourceReference includes the group, version and type of resource +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectivesPinnedResources { + /// group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: "", "apps", "build.openshift.io", etc. + pub group: String, + /// resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: "deployments", "deploymentconfigs", "pods", etc. + pub resource: String, + /// version is the API Version of the Resource. This value should consist of only lowercase alphanumeric characters. Example: "v1", "v1beta1", etc. + pub version: String, +} + +/// visibility defines the state of perspective along with access review checks if needed for that perspective. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectivesVisibility { + /// accessReview defines required and missing access review checks. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessReview")] + pub access_review: Option, + /// state defines the perspective is enabled or disabled or access review check is required. + pub state: ConsoleCustomizationPerspectivesVisibilityState, +} + +/// accessReview defines required and missing access review checks. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectivesVisibilityAccessReview { + /// missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub missing: Option>, + /// required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub required: Option>, +} + +/// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectivesVisibilityAccessReviewMissing { + /// Group is the API Group of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Resource is one of the existing resource types. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, + /// Subresource is one of the existing resource types. "" means none. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subresource: Option, + /// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub verb: Option, + /// Version is the API Version of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationPerspectivesVisibilityAccessReviewRequired { + /// Group is the API Group of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Resource is one of the existing resource types. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, + /// Subresource is one of the existing resource types. "" means none. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subresource: Option, + /// Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub verb: Option, + /// Version is the API Version of the Resource. "*" means all. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// visibility defines the state of perspective along with access review checks if needed for that perspective. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsoleCustomizationPerspectivesVisibilityState { + Enabled, + Disabled, + AccessReview, +} + +/// projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationProjectAccess { + /// availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableClusterRoles")] + pub available_cluster_roles: Option>, +} + +/// quickStarts allows customization of available ConsoleQuickStart resources in console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleCustomizationQuickStarts { + /// disabled is a list of ConsoleQuickStart resource names that are not shown to users. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disabled: Option>, +} + +/// ConsoleSpec is the specification of the desired behavior of the Console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsoleLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// ConsoleSpec is the specification of the desired behavior of the Console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ConsoleOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// providers contains configuration for using specific service providers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleProviders { + /// statuspage contains ID for statuspage.io page that provides status info about. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub statuspage: Option, +} + +/// statuspage contains ID for statuspage.io page that provides status info about. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleProvidersStatuspage { + /// pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "pageID")] + pub page_id: Option, +} + +/// route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleRoute { + /// hostname is the desired custom domain under which console will be available. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostname: Option, + /// secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - "tls.crt" - to specifies custom certificate - "tls.key" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub secret: Option, +} + +/// secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - "tls.crt" - to specifies custom certificate - "tls.key" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleRouteSecret { + /// name is the metadata.name of the referenced secret + pub name: String, +} + +/// ConsoleStatus defines the observed status of the Console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsoleStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/csisnapshotcontrollers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/csisnapshotcontrollers.rs new file mode 100644 index 000000000..1df762f1c --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/csisnapshotcontrollers.rs @@ -0,0 +1,113 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/csisnapshotcontrollers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "CSISnapshotController", plural = "csisnapshotcontrollers")] +#[kube(status = "CSISnapshotControllerStatus")] +#[kube(schema = "disabled")] +pub struct CSISnapshotControllerSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CSISnapshotControllerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum CSISnapshotControllerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CSISnapshotControllerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CSISnapshotControllerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct CSISnapshotControllerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/dnses.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/dnses.rs new file mode 100644 index 000000000..ab4ee0438 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/dnses.rs @@ -0,0 +1,352 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/dnses.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the DNS. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "DNS", plural = "dnses")] +#[kube(status = "DNSStatus")] +#[kube(schema = "disabled")] +pub struct DNSSpec { + /// cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cache: Option, + /// logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses. Setting logLevel: Trace will produce extremely verbose logs. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether the DNS operator should manage cluster DNS + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// nodePlacement provides explicit control over the scheduling of DNS pods. + /// Generally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint. + /// If unset, defaults are used. See nodePlacement for more details. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePlacement")] + pub node_placement: Option, + /// operatorLogLevel controls the logging level of the DNS Operator. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal". setting operatorLogLevel: Trace will produce extremely verbose logs. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server. + /// For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", and the name query is for "www.a.foo.com", it will be routed to the Server with Zone "a.foo.com". + /// If this field is nil, no servers are created. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub servers: Option>, + /// upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (".") server + /// If this field is not specified, the upstream used will default to /etc/resolv.conf, with policy "sequential" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "upstreamResolvers")] + pub upstream_resolvers: Option, +} + +/// cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSCache { + /// negativeTTL is optional and specifies the amount of time that a negative response should be cached. + /// If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "negativeTTL")] + pub negative_ttl: Option, + /// positiveTTL is optional and specifies the amount of time that a positive response should be cached. + /// If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "positiveTTL")] + pub positive_ttl: Option, +} + +/// spec is the specification of the desired behavior of the DNS. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSLogLevel { + Normal, + Debug, + Trace, +} + +/// nodePlacement provides explicit control over the scheduling of DNS pods. +/// Generally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint. +/// If unset, defaults are used. See nodePlacement for more details. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSNodePlacement { + /// nodeSelector is the node selector applied to DNS pods. + /// If empty, the default is used, which is currently the following: + /// kubernetes.io/os: linux + /// This default is subject to change. + /// If set, the specified selector is used and replaces the default. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] + pub node_selector: Option>, + /// tolerations is a list of tolerations applied to DNS pods. + /// If empty, the DNS operator sets a toleration for the "node-role.kubernetes.io/master" taint. This default is subject to change. Specifying tolerations without including a toleration for the "node-role.kubernetes.io/master" taint may be risky as it could lead to an outage if all worker nodes become unavailable. + /// Note that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSNodePlacementTolerations { + /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub effect: Option, + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] + pub toleration_seconds: Option, + /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// spec is the specification of the desired behavior of the DNS. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSOperatorLogLevel { + Normal, + Debug, + Trace, +} + +/// Server defines the schema for a server that runs per instance of CoreDNS. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSServers { + /// forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forwardPlugin")] + pub forward_plugin: Option, + /// name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., "cluster.local") is invalid. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub zones: Option>, +} + +/// forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSServersForwardPlugin { + /// policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: + /// * "Random" picks a random upstream server for each query. * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + /// The default value is "Random" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub policy: Option, + /// protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "protocolStrategy")] + pub protocol_strategy: Option, + /// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. + /// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "transportConfig")] + pub transport_config: Option, + /// upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53. + /// A maximum of 15 upstreams is allowed per ForwardPlugin. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub upstreams: Option>, +} + +/// forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSServersForwardPluginPolicy { + Random, + RoundRobin, + Sequential, +} + +/// forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSServersForwardPluginProtocolStrategy { + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "")] + KopiumEmpty, +} + +/// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. +/// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSServersForwardPluginTransportConfig { + /// tls contains the additional configuration options to use when Transport is set to "TLS". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tls: Option, + /// transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s). + /// Possible values: "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is "Cleartext". "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from "TLS" to "Cleartext" explicitly. "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub transport: Option, +} + +/// tls contains the additional configuration options to use when Transport is set to "TLS". +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSServersForwardPluginTransportConfigTls { + /// caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. + /// 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caBundle")] + pub ca_bundle: Option, + /// serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s). + #[serde(rename = "serverName")] + pub server_name: String, +} + +/// caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. +/// 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSServersForwardPluginTransportConfigTlsCaBundle { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. +/// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSServersForwardPluginTransportConfigTransport { + #[serde(rename = "TLS")] + Tls, + Cleartext, + #[serde(rename = "")] + KopiumEmpty, +} + +/// upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (".") server +/// If this field is not specified, the upstream used will default to /etc/resolv.conf, with policy "sequential" +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSUpstreamResolvers { + /// Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: + /// * "Random" picks a random upstream server for each query. * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + /// The default value is "Sequential" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub policy: Option, + /// protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "protocolStrategy")] + pub protocol_strategy: Option, + /// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. + /// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "transportConfig")] + pub transport_config: Option, + /// Upstreams is a list of resolvers to forward name queries for the "." domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. + /// A maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default + #[serde(default, skip_serializing_if = "Option::is_none")] + pub upstreams: Option>, +} + +/// upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (".") server +/// If this field is not specified, the upstream used will default to /etc/resolv.conf, with policy "sequential" +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSUpstreamResolversPolicy { + Random, + RoundRobin, + Sequential, +} + +/// upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (".") server +/// If this field is not specified, the upstream used will default to /etc/resolv.conf, with policy "sequential" +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSUpstreamResolversProtocolStrategy { + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "")] + KopiumEmpty, +} + +/// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. +/// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSUpstreamResolversTransportConfig { + /// tls contains the additional configuration options to use when Transport is set to "TLS". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tls: Option, + /// transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s). + /// Possible values: "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is "Cleartext". "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from "TLS" to "Cleartext" explicitly. "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub transport: Option, +} + +/// tls contains the additional configuration options to use when Transport is set to "TLS". +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSUpstreamResolversTransportConfigTls { + /// caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. + /// 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caBundle")] + pub ca_bundle: Option, + /// serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s). + #[serde(rename = "serverName")] + pub server_name: String, +} + +/// caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers. +/// 1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSUpstreamResolversTransportConfigTlsCaBundle { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. +/// The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSUpstreamResolversTransportConfigTransport { + #[serde(rename = "TLS")] + Tls, + Cleartext, + #[serde(rename = "")] + KopiumEmpty, +} + +/// Upstream can either be of type SystemResolvConf, or of type Network. +/// * For an Upstream of type SystemResolvConf, no further fields are necessary: The upstream will be configured to use /etc/resolv.conf. * For an Upstream of type Network, a NetworkResolver field needs to be defined with an IP address or IP:port if the upstream listens on a port other than 53. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSUpstreamResolversUpstreams { + /// Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub address: Option, + /// Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub port: Option, + /// Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network. + /// * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: /etc/resolv.conf will be used * When Network is used, the Upstream structure must contain at least an Address + #[serde(rename = "type")] + pub r#type: DNSUpstreamResolversUpstreamsType, +} + +/// Upstream can either be of type SystemResolvConf, or of type Network. +/// * For an Upstream of type SystemResolvConf, no further fields are necessary: The upstream will be configured to use /etc/resolv.conf. * For an Upstream of type Network, a NetworkResolver field needs to be defined with an IP address or IP:port if the upstream listens on a port other than 53. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum DNSUpstreamResolversUpstreamsType { + SystemResolvConf, + Network, + #[serde(rename = "")] + KopiumEmpty, +} + +/// status is the most recently observed status of the DNS. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSStatus { + /// clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: "cluster.local" + /// More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + #[serde(rename = "clusterDomain")] + pub cluster_domain: String, + /// clusterIP is the service IP through which this DNS is made available. + /// In the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy. + /// In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @ + /// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + #[serde(rename = "clusterIP")] + pub cluster_ip: String, + /// conditions provide information about the state of the DNS on the cluster. + /// These are the supported DNS conditions: + /// * Available - True if the following conditions are met: * DNS controller daemonset is available. - False if any of those conditions are unsatisfied. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct DNSStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/etcds.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/etcds.rs new file mode 100644 index 000000000..3a84d0c55 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/etcds.rs @@ -0,0 +1,170 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/etcds.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Etcd", plural = "etcds")] +#[kube(status = "EtcdStatus")] +#[kube(schema = "disabled")] +pub struct EtcdSpec { + /// failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedRevisionLimit")] + pub failed_revision_limit: Option, + /// forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceRedeploymentReason")] + pub force_redeployment_reason: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "succeededRevisionLimit")] + pub succeeded_revision_limit: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EtcdLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EtcdOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EtcdStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// ControlPlaneHardwareSpeed declares valid hardware speed tolerance levels + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneHardwareSpeed")] + pub control_plane_hardware_speed: Option, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the deploymentID of the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// latestAvailableRevisionReason describe the detailed reason for the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevisionReason")] + pub latest_available_revision_reason: Option, + /// nodeStatuses track the deployment values and errors across individual nodes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStatuses")] + pub node_statuses: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EtcdStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum EtcdStatusControlPlaneHardwareSpeed { + #[serde(rename = "")] + KopiumEmpty, + Standard, + Slower, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EtcdStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// NodeStatus provides information about the current state of a particular node managed by this operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct EtcdStatusNodeStatuses { + /// currentRevision is the generation of the most recently successful deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentRevision")] + pub current_revision: Option, + /// lastFailedCount is how often the installer pod of the last failed revision failed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedCount")] + pub last_failed_count: Option, + /// lastFailedReason is a machine readable failure reason string. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedReason")] + pub last_failed_reason: Option, + /// lastFailedRevision is the generation of the deployment we tried and failed to deploy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevision")] + pub last_failed_revision: Option, + /// lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevisionErrors")] + pub last_failed_revision_errors: Option>, + /// lastFailedTime is the time the last failed revision failed the last time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedTime")] + pub last_failed_time: Option, + /// lastFallbackCount is how often a fallback to a previous revision happened. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFallbackCount")] + pub last_fallback_count: Option, + /// nodeName is the name of the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] + pub node_name: Option, + /// targetRevision is the generation of the deployment we're trying to apply + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRevision")] + pub target_revision: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/ingresscontrollers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/ingresscontrollers.rs new file mode 100644 index 000000000..c9e7df549 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/ingresscontrollers.rs @@ -0,0 +1,1507 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the IngressController. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "IngressController", plural = "ingresscontrollers")] +#[kube(namespaced)] +#[kube(status = "IngressControllerStatus")] +#[kube(schema = "disabled")] +pub struct IngressControllerSpec { + /// clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientTLS")] + pub client_tls: Option, + /// defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used. + /// The secret must contain the following keys and data: + /// tls.crt: certificate file contents tls.key: key file contents + /// If unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store. + /// If a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing. + /// The in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultCertificate")] + pub default_certificate: Option, + /// domain is a DNS name serviced by the ingress controller and is used to configure multiple features: + /// * For the LoadBalancerService endpoint publishing strategy, domain is used to configure DNS records. See endpointPublishingStrategy. + /// * When using a generated default certificate, the certificate will be valid for domain and its subdomains. See defaultCertificate. + /// * The value is published to individual Route statuses so that end-users know where to target external DNS records. + /// domain must be unique among all IngressControllers, and cannot be updated. + /// If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. + /// If unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform: + /// AWS: LoadBalancerService (with External scope) Azure: LoadBalancerService (with External scope) GCP: LoadBalancerService (with External scope) IBMCloud: LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService (with External scope) Libvirt: HostNetwork + /// Any other platform types (including None) default to HostNetwork. + /// endpointPublishingStrategy cannot be updated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointPublishingStrategy")] + pub endpoint_publishing_strategy: Option, + /// httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpCompression")] + pub http_compression: Option, + /// httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are "Respond" and "Ignore". If the field is set to "Respond", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to "Ignore", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is "Respond". + /// Typically, these connections come from load balancers' health probes or Web browsers' speculative connections ("preconnect") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to "Ignore" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpEmptyRequestsPolicy")] + pub http_empty_requests_policy: Option, + /// httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format "error-page-.http", where is an HTTP error code. For example, "error-page-503.http" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpErrorCodePages")] + pub http_error_code_pages: Option, + /// httpHeaders defines policy for HTTP headers. + /// If this field is empty, the default values are used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpHeaders")] + pub http_headers: Option, + /// logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub logging: Option, + /// namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards. + /// If unset, the default is no filtering. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// nodePlacement enables explicit control over the scheduling of the ingress controller. + /// If unset, defaults are used. See NodePlacement for more details. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePlacement")] + pub node_placement: Option, + /// replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. + /// The value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively. + /// These defaults are subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). + /// If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeAdmission")] + pub route_admission: Option, + /// routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards. + /// If unset, the default is no filtering. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeSelector")] + pub route_selector: Option, + /// tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + /// If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + /// Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsSecurityProfile")] + pub tls_security_profile: Option, + /// tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details. + /// Setting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tuningOptions")] + pub tuning_options: Option, + /// unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerClientTls { + /// allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedSubjectPatterns")] + pub allowed_subject_patterns: Option>, + /// clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace. + #[serde(rename = "clientCA")] + pub client_ca: IngressControllerClientTlsClientCa, + /// clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values "Required" or "Optional". + /// Note that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes. + #[serde(rename = "clientCertificatePolicy")] + pub client_certificate_policy: IngressControllerClientTlsClientCertificatePolicy, +} + +/// clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerClientTlsClientCa { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerClientTlsClientCertificatePolicy { + #[serde(rename = "")] + KopiumEmpty, + Required, + Optional, +} + +/// defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used. +/// The secret must contain the following keys and data: +/// tls.crt: certificate file contents tls.key: key file contents +/// If unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store. +/// If a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing. +/// The in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerDefaultCertificate { + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. +/// If unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform: +/// AWS: LoadBalancerService (with External scope) Azure: LoadBalancerService (with External scope) GCP: LoadBalancerService (with External scope) IBMCloud: LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService (with External scope) Libvirt: HostNetwork +/// Any other platform types (including None) default to HostNetwork. +/// endpointPublishingStrategy cannot be updated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategy { + /// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostNetwork")] + pub host_network: Option, + /// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loadBalancer")] + pub load_balancer: Option, + /// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePort")] + pub node_port: Option, + /// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub private: Option, + /// type is the publishing strategy to use. Valid values are: + /// * LoadBalancerService + /// Publishes the ingress controller using a Kubernetes LoadBalancer Service. + /// In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. + /// See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + /// If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. + /// Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. + /// * HostNetwork + /// Publishes the ingress controller on node ports where the ingress controller is deployed. + /// In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. + /// * Private + /// Does not publish the ingress controller. + /// In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. + /// * NodePortService + /// Publishes the ingress controller using a Kubernetes NodePort Service. + /// In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved. + #[serde(rename = "type")] + pub r#type: IngressControllerEndpointPublishingStrategyType, +} + +/// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyHostNetwork { + /// httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpPort")] + pub http_port: Option, + /// httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpsPort")] + pub https_port: Option, + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, + /// statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "statsPort")] + pub stats_port: Option, +} + +/// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyHostNetworkProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancer { + /// allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. "10.0.0.0/8" or "fd00::/8"). If no range is specified, "0.0.0.0/0" for IPv4 and "::/0" for IPv6 are used by default, which allows all source addresses. + /// To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the "router-" service in the "openshift-ingress" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedSourceRanges")] + pub allowed_source_ranges: Option>, + /// dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged. + #[serde(rename = "dnsManagementPolicy")] + pub dns_management_policy: IngressControllerEndpointPublishingStrategyLoadBalancerDnsManagementPolicy, + /// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. + /// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerParameters")] + pub provider_parameters: Option, + /// scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + pub scope: IngressControllerEndpointPublishingStrategyLoadBalancerScope, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerDnsManagementPolicy { + Managed, + Unmanaged, +} + +/// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. +/// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParameters { + /// aws provides configuration settings that are specific to AWS load balancers. + /// If empty, defaults will be applied. See specific aws fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// gcp provides configuration settings that are specific to GCP load balancers. + /// If empty, defaults will be applied. See specific gcp fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option, + /// ibm provides configuration settings that are specific to IBM Cloud load balancers. + /// If empty, defaults will be applied. See specific ibm fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibm: Option, + /// type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + #[serde(rename = "type")] + pub r#type: IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersType, +} + +/// aws provides configuration settings that are specific to AWS load balancers. +/// If empty, defaults will be applied. See specific aws fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersAws { + /// classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "classicLoadBalancer")] + pub classic_load_balancer: Option, + /// networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkLoadBalancer")] + pub network_load_balancer: Option, + /// type is the type of AWS load balancer to instantiate for an ingresscontroller. + /// Valid values are: + /// * "Classic": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + /// * "NLB": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + #[serde(rename = "type")] + pub r#type: IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersAwsType, +} + +/// classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersAwsClassicLoadBalancer { + /// connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectionIdleTimeout")] + pub connection_idle_timeout: Option, +} + +/// networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersAwsNetworkLoadBalancer { +} + +/// aws provides configuration settings that are specific to AWS load balancers. +/// If empty, defaults will be applied. See specific aws fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersAwsType { + Classic, + #[serde(rename = "NLB")] + Nlb, +} + +/// gcp provides configuration settings that are specific to GCP load balancers. +/// If empty, defaults will be applied. See specific gcp fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersGcp { + /// clientAccess describes how client access is restricted for internal load balancers. + /// Valid values are: * "Global": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. + /// https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + /// * "Local": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. + /// https://cloud.google.com/load-balancing/docs/internal#client_access + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientAccess")] + pub client_access: Option, +} + +/// gcp provides configuration settings that are specific to GCP load balancers. +/// If empty, defaults will be applied. See specific gcp fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersGcpClientAccess { + Global, + Local, +} + +/// ibm provides configuration settings that are specific to IBM Cloud load balancers. +/// If empty, defaults will be applied. See specific ibm fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersIbm { + /// protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas" + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// ibm provides configuration settings that are specific to IBM Cloud load balancers. +/// If empty, defaults will be applied. See specific ibm fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersIbmProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. +/// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerProviderParametersType { + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Nutanix, + OpenStack, + VSphere, + #[serde(rename = "IBM")] + Ibm, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyLoadBalancerScope { + Internal, + External, +} + +/// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyNodePort { + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyNodePortProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerEndpointPublishingStrategyPrivate { + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyPrivateProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. +/// If unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform: +/// AWS: LoadBalancerService (with External scope) Azure: LoadBalancerService (with External scope) GCP: LoadBalancerService (with External scope) IBMCloud: LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService (with External scope) Libvirt: HostNetwork +/// Any other platform types (including None) default to HostNetwork. +/// endpointPublishingStrategy cannot be updated. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerEndpointPublishingStrategyType { + LoadBalancerService, + HostNetwork, + Private, + NodePortService, +} + +/// httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpCompression { + /// mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression. + /// Note: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mimeTypes")] + pub mime_types: Option>, +} + +/// spec is the specification of the desired behavior of the IngressController. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerHttpEmptyRequestsPolicy { + Respond, + Ignore, +} + +/// httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format "error-page-.http", where is an HTTP error code. For example, "error-page-503.http" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpErrorCodePages { + /// name is the metadata.name of the referenced config map + pub name: String, +} + +/// httpHeaders defines policy for HTTP headers. +/// If this field is empty, the default values are used. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeaders { + /// actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub actions: Option, + /// forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following: + /// * "Append", which specifies that the IngressController appends the headers, preserving existing headers. + /// * "Replace", which specifies that the IngressController sets the headers, replacing any existing Forwarded or X-Forwarded-* headers. + /// * "IfNone", which specifies that the IngressController sets the headers if they are not already set. + /// * "Never", which specifies that the IngressController never sets the headers, preserving any existing headers. + /// By default, the policy is "Append". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forwardedHeaderPolicy")] + pub forwarded_header_policy: Option, + /// headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying "X-Forwarded-For" indicates that the "x-forwarded-for" HTTP header should be adjusted to have the specified capitalization. + /// These adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1. + /// For request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses. + /// If this field is empty, no request headers are adjusted. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "headerNameCaseAdjustments")] + pub header_name_case_adjustments: Option>, + /// uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests. + /// If this field is empty, no such header is injected into requests. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "uniqueId")] + pub unique_id: Option, +} + +/// actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActions { + /// request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub request: Option>, + /// response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub response: Option>, +} + +/// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsRequest { + /// action specifies actions to perform on headers, such as setting or deleting headers. + pub action: IngressControllerHttpHeadersActionsRequestAction, + /// name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique. + pub name: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsRequestAction { + /// set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub set: Option, + /// type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + #[serde(rename = "type")] + pub r#type: IngressControllerHttpHeadersActionsRequestActionType, +} + +/// set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsRequestActionSet { + /// value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + pub value: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerHttpHeadersActionsRequestActionType { + Set, + Delete, +} + +/// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsResponse { + /// action specifies actions to perform on headers, such as setting or deleting headers. + pub action: IngressControllerHttpHeadersActionsResponseAction, + /// name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique. + pub name: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsResponseAction { + /// set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub set: Option, + /// type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + #[serde(rename = "type")] + pub r#type: IngressControllerHttpHeadersActionsResponseActionType, +} + +/// set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersActionsResponseActionSet { + /// value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + pub value: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerHttpHeadersActionsResponseActionType { + Set, + Delete, +} + +/// httpHeaders defines policy for HTTP headers. +/// If this field is empty, the default values are used. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerHttpHeadersForwardedHeaderPolicy { + Append, + Replace, + IfNone, + Never, +} + +/// uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests. +/// If this field is empty, no such header is injected into requests. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerHttpHeadersUniqueId { + /// format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub format: Option, + /// name specifies the name of the HTTP header (for example, "unique-id") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLogging { + /// access describes how the client requests should be logged. + /// If this field is empty, access logging is disabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub access: Option, +} + +/// access describes how the client requests should be logged. +/// If this field is empty, access logging is disabled. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccess { + /// destination is where access logs go. + pub destination: IngressControllerLoggingAccessDestination, + /// httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpCaptureCookies")] + pub http_capture_cookies: Option>, + /// httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured. + /// Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpCaptureHeaders")] + pub http_capture_headers: Option, + /// httpLogFormat specifies the format of the log message for an HTTP request. + /// If this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + /// Note that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpLogFormat")] + pub http_log_format: Option, + /// logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections ("preconnect"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are "Log" and "Ignore". The default value is "Log". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logEmptyRequests")] + pub log_empty_requests: Option, +} + +/// destination is where access logs go. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessDestination { + /// container holds parameters for the Container logging destination. Present only if type is Container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub container: Option, + /// syslog holds parameters for a syslog endpoint. Present only if type is Syslog. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub syslog: Option, + /// type is the type of destination for logs. It must be one of the following: + /// * Container + /// The ingress operator configures the sidecar container named "logs" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity. + /// * Syslog + /// Logs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance. + #[serde(rename = "type")] + pub r#type: IngressControllerLoggingAccessDestinationType, +} + +/// container holds parameters for the Container logging destination. Present only if type is Container. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessDestinationContainer { + /// maxLength is the maximum length of the log message. + /// Valid values are integers in the range 480 to 8192, inclusive. + /// When omitted, the default value is 1024. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxLength")] + pub max_length: Option, +} + +/// syslog holds parameters for a syslog endpoint. Present only if type is Syslog. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessDestinationSyslog { + /// address is the IP address of the syslog endpoint that receives log messages. + pub address: String, + /// facility specifies the syslog facility of log messages. + /// If this field is empty, the facility is "local1". + #[serde(default, skip_serializing_if = "Option::is_none")] + pub facility: Option, + /// maxLength is the maximum length of the log message. + /// Valid values are integers in the range 480 to 4096, inclusive. + /// When omitted, the default value is 1024. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxLength")] + pub max_length: Option, + /// port is the UDP port number of the syslog endpoint that receives log messages. + pub port: i32, +} + +/// syslog holds parameters for a syslog endpoint. Present only if type is Syslog. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerLoggingAccessDestinationSyslogFacility { + #[serde(rename = "kern")] + Kern, + #[serde(rename = "user")] + User, + #[serde(rename = "mail")] + Mail, + #[serde(rename = "daemon")] + Daemon, + #[serde(rename = "auth")] + Auth, + #[serde(rename = "syslog")] + Syslog, + #[serde(rename = "lpr")] + Lpr, + #[serde(rename = "news")] + News, + #[serde(rename = "uucp")] + Uucp, + #[serde(rename = "cron")] + Cron, + #[serde(rename = "auth2")] + Auth2, + #[serde(rename = "ftp")] + Ftp, + #[serde(rename = "ntp")] + Ntp, + #[serde(rename = "audit")] + Audit, + #[serde(rename = "alert")] + Alert, + #[serde(rename = "cron2")] + Cron2, + #[serde(rename = "local0")] + Local0, + #[serde(rename = "local1")] + Local1, + #[serde(rename = "local2")] + Local2, + #[serde(rename = "local3")] + Local3, + #[serde(rename = "local4")] + Local4, + #[serde(rename = "local5")] + Local5, + #[serde(rename = "local6")] + Local6, + #[serde(rename = "local7")] + Local7, +} + +/// destination is where access logs go. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerLoggingAccessDestinationType { + Container, + Syslog, +} + +/// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessHttpCaptureCookies { + /// matchType specifies the type of match to be performed on the cookie name. Allowed values are "Exact" for an exact string match and "Prefix" for a string prefix match. If "Exact" is specified, a name must be specified in the name field. If "Prefix" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType "Prefix" and namePrefix "foo" will capture a cookie named "foo" or "foobar" but not one named "bar". The first matching cookie is captured. + #[serde(rename = "matchType")] + pub match_type: IngressControllerLoggingAccessHttpCaptureCookiesMatchType, + /// maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + #[serde(rename = "maxLength")] + pub max_length: i64, + /// name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namePrefix")] + pub name_prefix: Option, +} + +/// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerLoggingAccessHttpCaptureCookiesMatchType { + Exact, + Prefix, +} + +/// httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured. +/// Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessHttpCaptureHeaders { + /// request specifies which HTTP request headers to capture. + /// If this field is empty, no request headers are captured. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub request: Option>, + /// response specifies which HTTP response headers to capture. + /// If this field is empty, no response headers are captured. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub response: Option>, +} + +/// IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessHttpCaptureHeadersRequest { + /// maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + #[serde(rename = "maxLength")] + pub max_length: i64, + /// name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pub name: String, +} + +/// IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerLoggingAccessHttpCaptureHeadersResponse { + /// maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + #[serde(rename = "maxLength")] + pub max_length: i64, + /// name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pub name: String, +} + +/// access describes how the client requests should be logged. +/// If this field is empty, access logging is disabled. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerLoggingAccessLogEmptyRequests { + Log, + Ignore, +} + +/// namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards. +/// If unset, the default is no filtering. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// nodePlacement enables explicit control over the scheduling of the ingress controller. +/// If unset, defaults are used. See NodePlacement for more details. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNodePlacement { + /// nodeSelector is the node selector applied to ingress controller deployments. + /// If set, the specified selector is used and replaces the default. + /// If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. + /// When defaultPlacement is Workers, the default is: + /// kubernetes.io/os: linux node-role.kubernetes.io/worker: '' + /// When defaultPlacement is ControlPlane, the default is: + /// kubernetes.io/os: linux node-role.kubernetes.io/master: '' + /// These defaults are subject to change. + /// Note that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] + pub node_selector: Option, + /// tolerations is a list of tolerations applied to ingress controller deployments. + /// The default is an empty list. + /// See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tolerations: Option>, +} + +/// nodeSelector is the node selector applied to ingress controller deployments. +/// If set, the specified selector is used and replaces the default. +/// If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. +/// When defaultPlacement is Workers, the default is: +/// kubernetes.io/os: linux node-role.kubernetes.io/worker: '' +/// When defaultPlacement is ControlPlane, the default is: +/// kubernetes.io/os: linux node-role.kubernetes.io/master: '' +/// These defaults are subject to change. +/// Note that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNodePlacementNodeSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNodePlacementNodeSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerNodePlacementTolerations { + /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub effect: Option, + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub operator: Option, + /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] + pub toleration_seconds: Option, + /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). +/// If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerRouteAdmission { + /// namespaceOwnership describes how host name claims across namespaces should be handled. + /// Value must be one of: + /// - Strict: Do not allow routes in different namespaces to claim the same host. + /// - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. + /// If empty, the default is Strict. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceOwnership")] + pub namespace_ownership: Option, + /// wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy. + /// [1] https://github.com/openshift/api/blob/master/route/v1/types.go + /// Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller. + /// WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. + /// If empty, defaults to "WildcardsDisallowed". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "wildcardPolicy")] + pub wildcard_policy: Option, +} + +/// routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). +/// If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerRouteAdmissionNamespaceOwnership { + InterNamespaceAllowed, + Strict, +} + +/// routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). +/// If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerRouteAdmissionWildcardPolicy { + WildcardsAllowed, + WildcardsDisallowed, +} + +/// routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards. +/// If unset, the default is no filtering. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerRouteSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerRouteSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. +/// If unset, the default is based on the apiservers.config.openshift.io/cluster resource. +/// Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTlsSecurityProfile { + /// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: + /// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub custom: Option, + /// intermediate is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub intermediate: Option, + /// modern is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 + /// NOTE: Currently unsupported. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub modern: Option, + /// old is a TLS security profile based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + /// and looks like this (yaml): + /// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub old: Option, + /// type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: + /// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + /// The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. + /// Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: +/// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTlsSecurityProfileCustom { + /// ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): + /// ciphers: - DES-CBC3-SHA + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ciphers: Option>, + /// minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + /// minTLSVersion: TLSv1.1 + /// NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minTLSVersion")] + pub min_tls_version: Option, +} + +/// custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: +/// ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerTlsSecurityProfileCustomMinTlsVersion { + #[serde(rename = "VersionTLS10")] + VersionTls10, + #[serde(rename = "VersionTLS11")] + VersionTls11, + #[serde(rename = "VersionTLS12")] + VersionTls12, + #[serde(rename = "VersionTLS13")] + VersionTls13, +} + +/// intermediate is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTlsSecurityProfileIntermediate { +} + +/// modern is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 +/// NOTE: Currently unsupported. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTlsSecurityProfileModern { +} + +/// old is a TLS security profile based on: +/// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +/// and looks like this (yaml): +/// ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0 +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTlsSecurityProfileOld { +} + +/// tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. +/// If unset, the default is based on the apiservers.config.openshift.io/cluster resource. +/// Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerTlsSecurityProfileType { + Old, + Intermediate, + Modern, + Custom, +} + +/// tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details. +/// Setting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerTuningOptions { + /// clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection. + /// If unset, the default timeout is 1s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientFinTimeout")] + pub client_fin_timeout: Option, + /// clientTimeout defines how long a connection will be held open while waiting for a client response. + /// If unset, the default timeout is 30s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientTimeout")] + pub client_timeout: Option, + /// headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes. + /// Setting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "headerBufferBytes")] + pub header_buffer_bytes: Option, + /// headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes. + /// Setting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "headerBufferMaxRewriteBytes")] + pub header_buffer_max_rewrite_bytes: Option, + /// healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation "router.openshift.io/haproxy.health.check.interval". + /// Expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + /// Setting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such. + /// An empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s. + /// Currently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "healthCheckInterval")] + pub health_check_interval: Option, + /// maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed. + /// Permitted values are: empty, 0, -1, and the range 2000-2000000. + /// If this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases. + /// If the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000. + /// Setting a value that is greater than the current operating system limit will prevent the HAProxy process from starting. + /// If you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime. + /// You can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}'. + /// You can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxConnections")] + pub max_connections: Option, + /// reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly. + /// The value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default). + /// A zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice. + /// This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h". + /// Note: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "reloadInterval")] + pub reload_interval: Option, + /// serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection. + /// If unset, the default timeout is 1s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serverFinTimeout")] + pub server_fin_timeout: Option, + /// serverTimeout defines how long a connection will be held open while waiting for a server/backend response. + /// If unset, the default timeout is 30s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serverTimeout")] + pub server_timeout: Option, + /// threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases. + /// Setting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "threadCount")] + pub thread_count: Option, + /// tlsInspectDelay defines how long the router can hold data to find a matching route. + /// Setting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used. + /// If unset, the default inspect delay is 5s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsInspectDelay")] + pub tls_inspect_delay: Option, + /// tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle. + /// If unset, the default timeout is 1h + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tunnelTimeout")] + pub tunnel_timeout: Option, +} + +/// status is the most recently observed status of the IngressController. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatus { + /// availableReplicas is number of observed available replicas according to the ingress controller deployment. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableReplicas")] + pub available_replicas: Option, + /// conditions is a list of conditions and their status. + /// Available means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas) + /// There are additional conditions which indicate the status of other ingress controller features and capabilities. + /// * LoadBalancerManaged - True if the following conditions are met: * The endpoint publishing strategy requires a service load balancer. - False if any of those conditions are unsatisfied. + /// * LoadBalancerReady - True if the following conditions are met: * A load balancer is managed. * The load balancer is ready. - False if any of those conditions are unsatisfied. + /// * DNSManaged - True if the following conditions are met: * The endpoint publishing strategy and platform support DNS. * The ingress controller domain is set. * dns.config.openshift.io/cluster configures DNS zones. - False if any of those conditions are unsatisfied. + /// * DNSReady - True if the following conditions are met: * DNS is managed. * DNS records have been successfully created. - False if any of those conditions are unsatisfied. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// domain is the actual domain in use. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// endpointPublishingStrategy is the actual strategy in use. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointPublishingStrategy")] + pub endpoint_publishing_strategy: Option, + /// namespaceSelector is the actual namespaceSelector in use. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] + pub namespace_selector: Option, + /// observedGeneration is the most recent generation observed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// routeSelector is the actual routeSelector in use. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeSelector")] + pub route_selector: Option, + /// selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, + /// tlsProfile is the TLS connection configuration that is in effect. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "tlsProfile")] + pub tls_profile: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// endpointPublishingStrategy is the actual strategy in use. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategy { + /// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostNetwork")] + pub host_network: Option, + /// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loadBalancer")] + pub load_balancer: Option, + /// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePort")] + pub node_port: Option, + /// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub private: Option, + /// type is the publishing strategy to use. Valid values are: + /// * LoadBalancerService + /// Publishes the ingress controller using a Kubernetes LoadBalancer Service. + /// In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. + /// See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + /// If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. + /// Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. + /// * HostNetwork + /// Publishes the ingress controller on node ports where the ingress controller is deployed. + /// In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. + /// * Private + /// Does not publish the ingress controller. + /// In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. + /// * NodePortService + /// Publishes the ingress controller using a Kubernetes NodePort Service. + /// In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved. + #[serde(rename = "type")] + pub r#type: IngressControllerStatusEndpointPublishingStrategyType, +} + +/// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyHostNetwork { + /// httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpPort")] + pub http_port: Option, + /// httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpsPort")] + pub https_port: Option, + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, + /// statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "statsPort")] + pub stats_port: Option, +} + +/// hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyHostNetworkProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancer { + /// allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. "10.0.0.0/8" or "fd00::/8"). If no range is specified, "0.0.0.0/0" for IPv4 and "::/0" for IPv6 are used by default, which allows all source addresses. + /// To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the "router-" service in the "openshift-ingress" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedSourceRanges")] + pub allowed_source_ranges: Option>, + /// dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged. + #[serde(rename = "dnsManagementPolicy")] + pub dns_management_policy: IngressControllerStatusEndpointPublishingStrategyLoadBalancerDnsManagementPolicy, + /// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. + /// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerParameters")] + pub provider_parameters: Option, + /// scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + pub scope: IngressControllerStatusEndpointPublishingStrategyLoadBalancerScope, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerDnsManagementPolicy { + Managed, + Unmanaged, +} + +/// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. +/// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParameters { + /// aws provides configuration settings that are specific to AWS load balancers. + /// If empty, defaults will be applied. See specific aws fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub aws: Option, + /// gcp provides configuration settings that are specific to GCP load balancers. + /// If empty, defaults will be applied. See specific gcp fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gcp: Option, + /// ibm provides configuration settings that are specific to IBM Cloud load balancers. + /// If empty, defaults will be applied. See specific ibm fields for details about their defaults. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ibm: Option, + /// type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + #[serde(rename = "type")] + pub r#type: IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersType, +} + +/// aws provides configuration settings that are specific to AWS load balancers. +/// If empty, defaults will be applied. See specific aws fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersAws { + /// classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "classicLoadBalancer")] + pub classic_load_balancer: Option, + /// networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkLoadBalancer")] + pub network_load_balancer: Option, + /// type is the type of AWS load balancer to instantiate for an ingresscontroller. + /// Valid values are: + /// * "Classic": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + /// * "NLB": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: + /// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + #[serde(rename = "type")] + pub r#type: IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersAwsType, +} + +/// classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersAwsClassicLoadBalancer { + /// connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectionIdleTimeout")] + pub connection_idle_timeout: Option, +} + +/// networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersAwsNetworkLoadBalancer { +} + +/// aws provides configuration settings that are specific to AWS load balancers. +/// If empty, defaults will be applied. See specific aws fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersAwsType { + Classic, + #[serde(rename = "NLB")] + Nlb, +} + +/// gcp provides configuration settings that are specific to GCP load balancers. +/// If empty, defaults will be applied. See specific gcp fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersGcp { + /// clientAccess describes how client access is restricted for internal load balancers. + /// Valid values are: * "Global": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. + /// https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + /// * "Local": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. + /// https://cloud.google.com/load-balancing/docs/internal#client_access + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientAccess")] + pub client_access: Option, +} + +/// gcp provides configuration settings that are specific to GCP load balancers. +/// If empty, defaults will be applied. See specific gcp fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersGcpClientAccess { + Global, + Local, +} + +/// ibm provides configuration settings that are specific to IBM Cloud load balancers. +/// If empty, defaults will be applied. See specific ibm fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersIbm { + /// protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas" + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// ibm provides configuration settings that are specific to IBM Cloud load balancers. +/// If empty, defaults will be applied. See specific ibm fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersIbmProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// providerParameters holds desired load balancer information specific to the underlying infrastructure provider. +/// If empty, defaults will be applied. See specific providerParameters fields for details about their defaults. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerProviderParametersType { + #[serde(rename = "AWS")] + Aws, + Azure, + BareMetal, + #[serde(rename = "GCP")] + Gcp, + Nutanix, + OpenStack, + VSphere, + #[serde(rename = "IBM")] + Ibm, +} + +/// loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyLoadBalancerScope { + Internal, + External, +} + +/// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyNodePort { + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyNodePortProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusEndpointPublishingStrategyPrivate { + /// protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. + /// PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. + /// The following values are valid for this field: + /// * The empty string. * "TCP". * "PROXY". + /// The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +/// private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyPrivateProtocol { + #[serde(rename = "")] + KopiumEmpty, + #[serde(rename = "TCP")] + Tcp, + #[serde(rename = "PROXY")] + Proxy, +} + +/// endpointPublishingStrategy is the actual strategy in use. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusEndpointPublishingStrategyType { + LoadBalancerService, + HostNetwork, + Private, + NodePortService, +} + +/// namespaceSelector is the actual namespaceSelector in use. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusNamespaceSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusNamespaceSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// routeSelector is the actual routeSelector in use. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusRouteSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusRouteSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// tlsProfile is the TLS connection configuration that is in effect. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct IngressControllerStatusTlsProfile { + /// ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): + /// ciphers: - DES-CBC3-SHA + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ciphers: Option>, + /// minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + /// minTLSVersion: TLSv1.1 + /// NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minTLSVersion")] + pub min_tls_version: Option, +} + +/// tlsProfile is the TLS connection configuration that is in effect. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum IngressControllerStatusTlsProfileMinTlsVersion { + #[serde(rename = "VersionTLS10")] + VersionTls10, + #[serde(rename = "VersionTLS11")] + VersionTls11, + #[serde(rename = "VersionTLS12")] + VersionTls12, + #[serde(rename = "VersionTLS13")] + VersionTls13, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/insightsoperators.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/insightsoperators.rs new file mode 100644 index 000000000..f47fbcbff --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/insightsoperators.rs @@ -0,0 +1,210 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/insightsoperators.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Insights. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "InsightsOperator", plural = "insightsoperators")] +#[kube(status = "InsightsOperatorStatus")] +#[kube(schema = "disabled")] +pub struct InsightsOperatorSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the Insights. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InsightsOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Insights. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InsightsOperatorOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status is the most recently observed status of the Insights operator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatherStatus")] + pub gather_status: Option, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "insightsReport")] + pub insights_report: Option, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusGatherStatus { + /// gatherers is a list of active gatherers (and their statuses) in the last gathering. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gatherers: Option>, + /// lastGatherDuration is the total time taken to process all gatherers during the last gather event. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGatherDuration")] + pub last_gather_duration: Option, + /// lastGatherTime is the last time when Insights data gathering finished. An empty value means that no data has been gathered yet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGatherTime")] + pub last_gather_time: Option, +} + +/// gathererStatus represents information about a particular data gatherer. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusGatherStatusGatherers { + /// conditions provide details on the status of each gatherer. + pub conditions: Vec, + /// lastGatherDuration represents the time spent gathering. + #[serde(rename = "lastGatherDuration")] + pub last_gather_duration: String, + /// name is the name of the gatherer. + pub name: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusGatherStatusGatherersConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: InsightsOperatorStatusGatherStatusGatherersConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InsightsOperatorStatusGatherStatusGatherersConditionsStatus { + True, + False, + Unknown, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusInsightsReport { + /// downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "downloadedAt")] + pub downloaded_at: Option, + /// healthChecks provides basic information about active Insights health checks in a cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "healthChecks")] + pub health_checks: Option>, +} + +/// healthCheck represents an Insights health check attributes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct InsightsOperatorStatusInsightsReportHealthChecks { + /// advisorURI provides the URL link to the Insights Advisor. + #[serde(rename = "advisorURI")] + pub advisor_uri: String, + /// description provides basic description of the healtcheck. + pub description: String, + /// state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface. + pub state: InsightsOperatorStatusInsightsReportHealthChecksState, + /// totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue. + #[serde(rename = "totalRisk")] + pub total_risk: i32, +} + +/// healthCheck represents an Insights health check attributes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum InsightsOperatorStatusInsightsReportHealthChecksState { + Enabled, + Disabled, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeapiservers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeapiservers.rs new file mode 100644 index 000000000..ba4a58e44 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeapiservers.rs @@ -0,0 +1,176 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/kubeapiservers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Kubernetes API Server +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "KubeAPIServer", plural = "kubeapiservers")] +#[kube(status = "KubeAPIServerStatus")] +#[kube(schema = "disabled")] +pub struct KubeAPIServerSpec { + /// failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedRevisionLimit")] + pub failed_revision_limit: Option, + /// forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceRedeploymentReason")] + pub force_redeployment_reason: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "succeededRevisionLimit")] + pub succeeded_revision_limit: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the Kubernetes API Server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeAPIServerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Kubernetes API Server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeAPIServerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status is the most recently observed status of the Kubernetes API Server +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeAPIServerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the deploymentID of the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// latestAvailableRevisionReason describe the detailed reason for the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevisionReason")] + pub latest_available_revision_reason: Option, + /// nodeStatuses track the deployment values and errors across individual nodes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStatuses")] + pub node_statuses: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountIssuers")] + pub service_account_issuers: Option>, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeAPIServerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeAPIServerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// NodeStatus provides information about the current state of a particular node managed by this operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeAPIServerStatusNodeStatuses { + /// currentRevision is the generation of the most recently successful deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentRevision")] + pub current_revision: Option, + /// lastFailedCount is how often the installer pod of the last failed revision failed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedCount")] + pub last_failed_count: Option, + /// lastFailedReason is a machine readable failure reason string. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedReason")] + pub last_failed_reason: Option, + /// lastFailedRevision is the generation of the deployment we tried and failed to deploy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevision")] + pub last_failed_revision: Option, + /// lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevisionErrors")] + pub last_failed_revision_errors: Option>, + /// lastFailedTime is the time the last failed revision failed the last time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedTime")] + pub last_failed_time: Option, + /// lastFallbackCount is how often a fallback to a previous revision happened. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFallbackCount")] + pub last_fallback_count: Option, + /// nodeName is the name of the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] + pub node_name: Option, + /// targetRevision is the generation of the deployment we're trying to apply + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRevision")] + pub target_revision: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeAPIServerStatusServiceAccountIssuers { + /// expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list of service account issuers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "expirationTime")] + pub expiration_time: Option, + /// name is the name of the service account issuer --- + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/kubecontrollermanagers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubecontrollermanagers.rs new file mode 100644 index 000000000..8919ff747 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubecontrollermanagers.rs @@ -0,0 +1,166 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/kubecontrollermanagers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Kubernetes Controller Manager +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "KubeControllerManager", plural = "kubecontrollermanagers")] +#[kube(status = "KubeControllerManagerStatus")] +#[kube(schema = "disabled")] +pub struct KubeControllerManagerSpec { + /// failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedRevisionLimit")] + pub failed_revision_limit: Option, + /// forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceRedeploymentReason")] + pub force_redeployment_reason: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "succeededRevisionLimit")] + pub succeeded_revision_limit: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, + /// useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "useMoreSecureServiceCA")] + pub use_more_secure_service_ca: Option, +} + +/// spec is the specification of the desired behavior of the Kubernetes Controller Manager +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeControllerManagerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Kubernetes Controller Manager +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeControllerManagerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status is the most recently observed status of the Kubernetes Controller Manager +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeControllerManagerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the deploymentID of the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// latestAvailableRevisionReason describe the detailed reason for the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevisionReason")] + pub latest_available_revision_reason: Option, + /// nodeStatuses track the deployment values and errors across individual nodes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStatuses")] + pub node_statuses: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeControllerManagerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeControllerManagerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// NodeStatus provides information about the current state of a particular node managed by this operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeControllerManagerStatusNodeStatuses { + /// currentRevision is the generation of the most recently successful deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentRevision")] + pub current_revision: Option, + /// lastFailedCount is how often the installer pod of the last failed revision failed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedCount")] + pub last_failed_count: Option, + /// lastFailedReason is a machine readable failure reason string. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedReason")] + pub last_failed_reason: Option, + /// lastFailedRevision is the generation of the deployment we tried and failed to deploy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevision")] + pub last_failed_revision: Option, + /// lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevisionErrors")] + pub last_failed_revision_errors: Option>, + /// lastFailedTime is the time the last failed revision failed the last time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedTime")] + pub last_failed_time: Option, + /// lastFallbackCount is how often a fallback to a previous revision happened. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFallbackCount")] + pub last_fallback_count: Option, + /// nodeName is the name of the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] + pub node_name: Option, + /// targetRevision is the generation of the deployment we're trying to apply + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRevision")] + pub target_revision: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeschedulers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeschedulers.rs new file mode 100644 index 000000000..1c4d13a63 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubeschedulers.rs @@ -0,0 +1,163 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/kubeschedulers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Kubernetes Scheduler +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "KubeScheduler", plural = "kubeschedulers")] +#[kube(status = "KubeSchedulerStatus")] +#[kube(schema = "disabled")] +pub struct KubeSchedulerSpec { + /// failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedRevisionLimit")] + pub failed_revision_limit: Option, + /// forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceRedeploymentReason")] + pub force_redeployment_reason: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "succeededRevisionLimit")] + pub succeeded_revision_limit: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the Kubernetes Scheduler +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeSchedulerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Kubernetes Scheduler +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeSchedulerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status is the most recently observed status of the Kubernetes Scheduler +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeSchedulerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the deploymentID of the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// latestAvailableRevisionReason describe the detailed reason for the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevisionReason")] + pub latest_available_revision_reason: Option, + /// nodeStatuses track the deployment values and errors across individual nodes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStatuses")] + pub node_statuses: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeSchedulerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeSchedulerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// NodeStatus provides information about the current state of a particular node managed by this operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeSchedulerStatusNodeStatuses { + /// currentRevision is the generation of the most recently successful deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentRevision")] + pub current_revision: Option, + /// lastFailedCount is how often the installer pod of the last failed revision failed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedCount")] + pub last_failed_count: Option, + /// lastFailedReason is a machine readable failure reason string. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedReason")] + pub last_failed_reason: Option, + /// lastFailedRevision is the generation of the deployment we tried and failed to deploy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevision")] + pub last_failed_revision: Option, + /// lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevisionErrors")] + pub last_failed_revision_errors: Option>, + /// lastFailedTime is the time the last failed revision failed the last time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedTime")] + pub last_failed_time: Option, + /// lastFallbackCount is how often a fallback to a previous revision happened. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFallbackCount")] + pub last_fallback_count: Option, + /// nodeName is the name of the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] + pub node_name: Option, + /// targetRevision is the generation of the deployment we're trying to apply + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRevision")] + pub target_revision: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/kubestorageversionmigrators.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubestorageversionmigrators.rs new file mode 100644 index 000000000..5cdca2ccd --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/kubestorageversionmigrators.rs @@ -0,0 +1,109 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/kubestorageversionmigrators.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "KubeStorageVersionMigrator", plural = "kubestorageversionmigrators")] +#[kube(status = "KubeStorageVersionMigratorStatus")] +#[kube(schema = "disabled")] +pub struct KubeStorageVersionMigratorSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeStorageVersionMigratorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum KubeStorageVersionMigratorOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeStorageVersionMigratorStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeStorageVersionMigratorStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct KubeStorageVersionMigratorStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/machineconfigurations.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/machineconfigurations.rs new file mode 100644 index 000000000..25dbba21b --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/machineconfigurations.rs @@ -0,0 +1,163 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/machineconfigurations.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the Machine Config Operator +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "MachineConfiguration", plural = "machineconfigurations")] +#[kube(status = "MachineConfigurationStatus")] +#[kube(schema = "disabled")] +pub struct MachineConfigurationSpec { + /// failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failedRevisionLimit")] + pub failed_revision_limit: Option, + /// forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "forceRedeploymentReason")] + pub force_redeployment_reason: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "succeededRevisionLimit")] + pub succeeded_revision_limit: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the Machine Config Operator +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum MachineConfigurationLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the Machine Config Operator +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum MachineConfigurationOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status is the most recently observed status of the Machine Config Operator +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineConfigurationStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the deploymentID of the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// latestAvailableRevisionReason describe the detailed reason for the most recent deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevisionReason")] + pub latest_available_revision_reason: Option, + /// nodeStatuses track the deployment values and errors across individual nodes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStatuses")] + pub node_statuses: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineConfigurationStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineConfigurationStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + +/// NodeStatus provides information about the current state of a particular node managed by this operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineConfigurationStatusNodeStatuses { + /// currentRevision is the generation of the most recently successful deployment + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentRevision")] + pub current_revision: Option, + /// lastFailedCount is how often the installer pod of the last failed revision failed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedCount")] + pub last_failed_count: Option, + /// lastFailedReason is a machine readable failure reason string. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedReason")] + pub last_failed_reason: Option, + /// lastFailedRevision is the generation of the deployment we tried and failed to deploy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevision")] + pub last_failed_revision: Option, + /// lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedRevisionErrors")] + pub last_failed_revision_errors: Option>, + /// lastFailedTime is the time the last failed revision failed the last time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFailedTime")] + pub last_failed_time: Option, + /// lastFallbackCount is how often a fallback to a previous revision happened. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastFallbackCount")] + pub last_fallback_count: Option, + /// nodeName is the name of the node + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] + pub node_name: Option, + /// targetRevision is the generation of the deployment we're trying to apply + #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRevision")] + pub target_revision: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/mod.rs new file mode 100644 index 000000000..c27ab0753 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/mod.rs @@ -0,0 +1,20 @@ +pub mod authentications; +pub mod cloudcredentials; +pub mod clustercsidrivers; +pub mod configs; +pub mod consoles; +pub mod csisnapshotcontrollers; +pub mod dnses; +pub mod etcds; +pub mod ingresscontrollers; +pub mod insightsoperators; +pub mod kubeapiservers; +pub mod kubecontrollermanagers; +pub mod kubeschedulers; +pub mod kubestorageversionmigrators; +pub mod machineconfigurations; +pub mod networks; +pub mod openshiftapiservers; +pub mod openshiftcontrollermanagers; +pub mod servicecas; +pub mod storages; diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/networks.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/networks.rs new file mode 100644 index 000000000..3837dfde4 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/networks.rs @@ -0,0 +1,549 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/networks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// NetworkSpec is the top-level network configuration object. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Network", plural = "networks")] +#[kube(schema = "disabled")] +pub struct NetworkSpec { + /// additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalNetworks")] + pub additional_networks: Option>, + /// clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetwork")] + pub cluster_network: Option>, + /// defaultNetwork is the "default" network that all pods will receive + #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultNetwork")] + pub default_network: Option, + /// deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deployKubeProxy")] + pub deploy_kube_proxy: Option, + /// disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableMultiNetwork")] + pub disable_multi_network: Option, + /// disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableNetworkDiagnostics")] + pub disable_network_diagnostics: Option, + /// exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "exportNetworkFlows")] + pub export_network_flows: Option, + /// kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kubeProxyConfig")] + pub kube_proxy_config: Option, + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub migration: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceNetwork")] + pub service_network: Option>, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, + /// useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "useMultiNetworkPolicy")] + pub use_multi_network_policy: Option, +} + +/// AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworks { + /// name is the name of the network. This will be populated in the resulting CRD This must be unique. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rawCNIConfig")] + pub raw_cni_config: Option, + /// SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + #[serde(default, skip_serializing_if = "Option::is_none", rename = "simpleMacvlanConfig")] + pub simple_macvlan_config: Option, + /// type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfig { + /// IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipamConfig")] + pub ipam_config: Option, + /// master is the host interface to create the macvlan interface from. If not specified, it will be default route interface + #[serde(default, skip_serializing_if = "Option::is_none")] + pub master: Option, + /// mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mode: Option, + /// mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, +} + +/// IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfigIpamConfig { + /// StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + #[serde(default, skip_serializing_if = "Option::is_none", rename = "staticIPAMConfig")] + pub static_ipam_config: Option, + /// Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfigIpamConfigStaticIpamConfig { + /// Addresses configures IP address for the interface + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// DNS configures DNS for the interface + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dns: Option, + /// Routes configures IP routes for the interface + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfigIpamConfigStaticIpamConfigAddresses { + /// Address is the IP address in CIDR format + #[serde(default, skip_serializing_if = "Option::is_none")] + pub address: Option, + /// Gateway is IP inside of subnet to designate as the gateway + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway: Option, +} + +/// DNS configures DNS for the interface +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfigIpamConfigStaticIpamConfigDns { + /// Domain configures the domainname the local domain used for short hostname lookups + #[serde(default, skip_serializing_if = "Option::is_none")] + pub domain: Option, + /// Nameservers points DNS servers for IP lookup + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// Search configures priority ordered search domains for short hostname lookups + #[serde(default, skip_serializing_if = "Option::is_none")] + pub search: Option>, +} + +/// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkAdditionalNetworksSimpleMacvlanConfigIpamConfigStaticIpamConfigRoutes { + /// Destination points the IP route destination + #[serde(default, skip_serializing_if = "Option::is_none")] + pub destination: Option, + /// Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway: Option, +} + +/// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkClusterNetwork { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cidr: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPrefix")] + pub host_prefix: Option, +} + +/// defaultNetwork is the "default" network that all pods will receive +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetwork { + /// KuryrConfig configures the kuryr plugin + #[serde(default, skip_serializing_if = "Option::is_none", rename = "kuryrConfig")] + pub kuryr_config: Option, + /// openShiftSDNConfig configures the openshift-sdn plugin + #[serde(default, skip_serializing_if = "Option::is_none", rename = "openshiftSDNConfig")] + pub openshift_sdn_config: Option, + /// ovnKubernetesConfig configures the ovn-kubernetes plugin. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ovnKubernetesConfig")] + pub ovn_kubernetes_config: Option, + /// type is the type of network All NetworkTypes are supported except for NetworkTypeRaw + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// KuryrConfig configures the kuryr plugin +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkKuryrConfig { + /// The port kuryr-controller will listen for readiness and liveness requests. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controllerProbesPort")] + pub controller_probes_port: Option, + /// The port kuryr-daemon will listen for readiness and liveness requests. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "daemonProbesPort")] + pub daemon_probes_port: Option, + /// enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "enablePortPoolsPrepopulation")] + pub enable_port_pools_prepopulation: Option, + /// mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "openStackServiceNetwork")] + pub open_stack_service_network: Option, + /// poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "poolBatchPorts")] + pub pool_batch_ports: Option, + /// poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "poolMaxPorts")] + pub pool_max_ports: Option, + /// poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "poolMinPorts")] + pub pool_min_ports: Option, +} + +/// openShiftSDNConfig configures the openshift-sdn plugin +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOpenshiftSdnConfig { + /// enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableUnidling")] + pub enable_unidling: Option, + /// mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mode: Option, + /// mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "useExternalOpenvswitch")] + pub use_external_openvswitch: Option, + /// vxlanPort is the port to use for all vxlan packets. The default is 4789. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "vxlanPort")] + pub vxlan_port: Option, +} + +/// ovnKubernetesConfig configures the ovn-kubernetes plugin. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfig { + /// egressIPConfig holds the configuration for EgressIP options. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "egressIPConfig")] + pub egress_ip_config: Option, + /// gatewayConfig holds the configuration for node gateway options. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatewayConfig")] + pub gateway_config: Option, + /// geneve port is the UDP port to be used by geneve encapulation. Default is 6081 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "genevePort")] + pub geneve_port: Option, + /// HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hybridOverlayConfig")] + pub hybrid_overlay_config: Option, + /// ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipsecConfig")] + pub ipsec_config: Option, + /// mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "policyAuditConfig")] + pub policy_audit_config: Option, + /// v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "v4InternalSubnet")] + pub v4_internal_subnet: Option, + /// v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "v6InternalSubnet")] + pub v6_internal_subnet: Option, +} + +/// egressIPConfig holds the configuration for EgressIP options. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigEgressIpConfig { + /// reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "reachabilityTotalTimeoutSeconds")] + pub reachability_total_timeout_seconds: Option, +} + +/// gatewayConfig holds the configuration for node gateway options. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigGatewayConfig { + /// IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". The supported values are "Restricted" and "Global". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipForwarding")] + pub ip_forwarding: Option, + /// ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ipv4: Option, + /// ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ipv6: Option, + /// RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routingViaHost")] + pub routing_via_host: Option, +} + +/// ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigGatewayConfigIpv4 { + /// internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format + #[serde(default, skip_serializing_if = "Option::is_none", rename = "internalMasqueradeSubnet")] + pub internal_masquerade_subnet: Option, +} + +/// ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigGatewayConfigIpv6 { + /// internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted + #[serde(default, skip_serializing_if = "Option::is_none", rename = "internalMasqueradeSubnet")] + pub internal_masquerade_subnet: Option, +} + +/// HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigHybridOverlayConfig { + /// HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hybridClusterNetwork")] + pub hybrid_cluster_network: Option>, + /// HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hybridOverlayVXLANPort")] + pub hybrid_overlay_vxlan_port: Option, +} + +/// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigHybridOverlayConfigHybridClusterNetwork { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cidr: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPrefix")] + pub host_prefix: Option, +} + +/// ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigIpsecConfig { +} + +/// policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkDefaultNetworkOvnKubernetesConfigPolicyAuditConfig { + /// destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node's journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null" + #[serde(default, skip_serializing_if = "Option::is_none")] + pub destination: Option, + /// maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxFileSize")] + pub max_file_size: Option, + /// maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxLogFiles")] + pub max_log_files: Option, + /// rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rateLimit")] + pub rate_limit: Option, + /// syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "syslogFacility")] + pub syslog_facility: Option, +} + +/// exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExportNetworkFlows { + /// ipfix defines IPFIX configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ipfix: Option, + /// netFlow defines the NetFlow configuration. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "netFlow")] + pub net_flow: Option, + /// sFlow defines the SFlow configuration. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sFlow")] + pub s_flow: Option, +} + +/// ipfix defines IPFIX configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExportNetworkFlowsIpfix { + /// ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + #[serde(default, skip_serializing_if = "Option::is_none")] + pub collectors: Option>, +} + +/// netFlow defines the NetFlow configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExportNetworkFlowsNetFlow { + /// netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items + #[serde(default, skip_serializing_if = "Option::is_none")] + pub collectors: Option>, +} + +/// sFlow defines the SFlow configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkExportNetworkFlowsSFlow { + /// sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + #[serde(default, skip_serializing_if = "Option::is_none")] + pub collectors: Option>, +} + +/// kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkKubeProxyConfig { + /// The address to "bind" on Defaults to 0.0.0.0 + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bindAddress")] + pub bind_address: Option, + /// An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s + #[serde(default, skip_serializing_if = "Option::is_none", rename = "iptablesSyncPeriod")] + pub iptables_sync_period: Option, + /// Any additional arguments to pass to the kubeproxy process + #[serde(default, skip_serializing_if = "Option::is_none", rename = "proxyArguments")] + pub proxy_arguments: Option>, +} + +/// NetworkSpec is the top-level network configuration object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum NetworkLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkMigration { + /// features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub features: Option, + /// mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] + pub network_type: Option, +} + +/// features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkMigrationFeatures { + /// egressFirewall specifies whether or not the Egress Firewall configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress Firewall configure is migrated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "egressFirewall")] + pub egress_firewall: Option, + /// egressIP specifies whether or not the Egress IP configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress IP configure is migrated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "egressIP")] + pub egress_ip: Option, + /// multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub multicast: Option, +} + +/// mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkMigrationMtu { + /// machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub machine: Option, + /// network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option, +} + +/// machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkMigrationMtuMachine { + /// from is the MTU to migrate from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub from: Option, + /// to is the MTU to migrate to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub to: Option, +} + +/// network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkMigrationMtuNetwork { + /// from is the MTU to migrate from. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub from: Option, + /// to is the MTU to migrate to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub to: Option, +} + +/// NetworkSpec is the top-level network configuration object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum NetworkOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct NetworkStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftapiservers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftapiservers.rs new file mode 100644 index 000000000..7fae419a1 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftapiservers.rs @@ -0,0 +1,116 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/openshiftapiservers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec is the specification of the desired behavior of the OpenShift API Server. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "OpenShiftAPIServer", plural = "openshiftapiservers")] +#[kube(status = "OpenShiftAPIServerStatus")] +#[kube(schema = "disabled")] +pub struct OpenShiftAPIServerSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec is the specification of the desired behavior of the OpenShift API Server. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum OpenShiftAPIServerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec is the specification of the desired behavior of the OpenShift API Server. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum OpenShiftAPIServerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status defines the observed status of the OpenShift API Server. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftAPIServerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestAvailableRevision")] + pub latest_available_revision: Option, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftAPIServerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftAPIServerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftcontrollermanagers.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftcontrollermanagers.rs new file mode 100644 index 000000000..62fa8c85d --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/openshiftcontrollermanagers.rs @@ -0,0 +1,109 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/openshiftcontrollermanagers.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "OpenShiftControllerManager", plural = "openshiftcontrollermanagers")] +#[kube(status = "OpenShiftControllerManagerStatus")] +#[kube(schema = "disabled")] +pub struct OpenShiftControllerManagerSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum OpenShiftControllerManagerLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum OpenShiftControllerManagerOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftControllerManagerStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftControllerManagerStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct OpenShiftControllerManagerStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/servicecas.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/servicecas.rs new file mode 100644 index 000000000..ab3d52d97 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/servicecas.rs @@ -0,0 +1,113 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/servicecas.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "ServiceCA", plural = "servicecas")] +#[kube(status = "ServiceCAStatus")] +#[kube(schema = "disabled")] +pub struct ServiceCASpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ServiceCALogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ServiceCAOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ServiceCAStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ServiceCAStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ServiceCAStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/operator_openshift_io/v1/storages.rs b/kube-custom-resources-rs/src/operator_openshift_io/v1/storages.rs new file mode 100644 index 000000000..7b67c67a2 --- /dev/null +++ b/kube-custom-resources-rs/src/operator_openshift_io/v1/storages.rs @@ -0,0 +1,126 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/operator.openshift.io/v1/storages.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// spec holds user settable values for configuration +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "operator.openshift.io", version = "v1", kind = "Storage", plural = "storages")] +#[kube(status = "StorageStatus")] +#[kube(schema = "disabled")] +pub struct StorageSpec { + /// logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] + pub log_level: Option, + /// managementState indicates whether and how the operator should manage the component + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedConfig")] + pub observed_config: Option>, + /// operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. + /// Valid values are: "Normal", "Debug", "Trace", "TraceAll". Defaults to "Normal". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "operatorLogLevel")] + pub operator_log_level: Option, + /// unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unsupportedConfigOverrides")] + pub unsupported_config_overrides: Option>, + /// VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "vsphereStorageDriver")] + pub vsphere_storage_driver: Option, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StorageLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StorageOperatorLogLevel { + #[serde(rename = "")] + KopiumEmpty, + Normal, + Debug, + Trace, + TraceAll, +} + +/// spec holds user settable values for configuration +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum StorageVsphereStorageDriver { + #[serde(rename = "")] + KopiumEmpty, + LegacyDeprecatedInTreeDriver, + #[serde(rename = "CSIWithMigrationDriver")] + CsiWithMigrationDriver, +} + +/// status holds observed values from the cluster. They may not be overridden. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct StorageStatus { + /// conditions is a list of conditions and their status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub generations: Option>, + /// observedGeneration is the last generation change you've dealt with + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// readyReplicas indicates how many replicas are ready and at the desired state + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// version is the level this availability applies to + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// OperatorCondition is just the standard condition fields. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct StorageStatusConditions { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub status: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct StorageStatusGenerations { + /// group is the group of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub group: Option, + /// hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// lastGeneration is the last generation of the workload controller involved + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastGeneration")] + pub last_generation: Option, + /// name is the name of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// namespace is where the thing you're tracking is + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// resource is the resource type of the thing you're tracking + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resource: Option, +} + diff --git a/kube-custom-resources-rs/src/platform_openshift_io/mod.rs b/kube-custom-resources-rs/src/platform_openshift_io/mod.rs new file mode 100644 index 000000000..32a5a9d4f --- /dev/null +++ b/kube-custom-resources-rs/src/platform_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/mod.rs b/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/mod.rs new file mode 100644 index 000000000..a64dbc5e3 --- /dev/null +++ b/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/mod.rs @@ -0,0 +1 @@ +pub mod platformoperators; diff --git a/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/platformoperators.rs b/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/platformoperators.rs new file mode 100644 index 000000000..4ecf28b81 --- /dev/null +++ b/kube-custom-resources-rs/src/platform_openshift_io/v1alpha1/platformoperators.rs @@ -0,0 +1,76 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/platform.openshift.io/v1alpha1/platformoperators.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// PlatformOperatorSpec defines the desired state of PlatformOperator. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "platform.openshift.io", version = "v1alpha1", kind = "PlatformOperator", plural = "platformoperators")] +#[kube(status = "PlatformOperatorStatus")] +#[kube(schema = "disabled")] +pub struct PlatformOperatorSpec { + /// package contains the desired package and its configuration for this PlatformOperator. + pub package: PlatformOperatorPackage, +} + +/// package contains the desired package and its configuration for this PlatformOperator. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct PlatformOperatorPackage { + /// name contains the desired OLM-based Operator package name that is defined in an existing CatalogSource resource in the cluster. + /// This configured package will be managed with the cluster's lifecycle. In the current implementation, it will be retrieving this name from a list of supported operators out of the catalogs included with OpenShift. + /// --- + pub name: String, +} + +/// PlatformOperatorStatus defines the observed state of PlatformOperator +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct PlatformOperatorStatus { + /// activeBundleDeployment is the reference to the BundleDeployment resource that's being managed by this PO resource. If this field is not populated in the status then it means the PlatformOperator has either not been installed yet or is failing to install. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "activeBundleDeployment")] + pub active_bundle_deployment: Option, + /// conditions represent the latest available observations of a platform operator's current state. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// activeBundleDeployment is the reference to the BundleDeployment resource that's being managed by this PO resource. If this field is not populated in the status then it means the PlatformOperator has either not been installed yet or is failing to install. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct PlatformOperatorStatusActiveBundleDeployment { + /// name is the metadata.name of the referenced BundleDeployment object. + pub name: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct PlatformOperatorStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: PlatformOperatorStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum PlatformOperatorStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/quota_openshift_io/mod.rs b/kube-custom-resources-rs/src/quota_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/quota_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/quota_openshift_io/v1/clusterresourcequotas.rs b/kube-custom-resources-rs/src/quota_openshift_io/v1/clusterresourcequotas.rs new file mode 100644 index 000000000..f58265c0a --- /dev/null +++ b/kube-custom-resources-rs/src/quota_openshift_io/v1/clusterresourcequotas.rs @@ -0,0 +1,131 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/quota.openshift.io/v1/clusterresourcequotas.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// Spec defines the desired quota +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "quota.openshift.io", version = "v1", kind = "ClusterResourceQuota", plural = "clusterresourcequotas")] +#[kube(status = "ClusterResourceQuotaStatus")] +#[kube(schema = "disabled")] +pub struct ClusterResourceQuotaSpec { + /// Quota defines the desired quota + pub quota: ClusterResourceQuotaQuota, + /// Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource. + pub selector: ClusterResourceQuotaSelector, +} + +/// Quota defines the desired quota +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaQuota { + /// hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hard: Option>, + /// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "scopeSelector")] + pub scope_selector: Option, + /// A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub scopes: Option>, +} + +/// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaQuotaScopeSelector { + /// A list of scope selector requirements by scope of the resources. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, +} + +/// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaQuotaScopeSelectorMatchExpressions { + /// Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. + pub operator: String, + /// The name of the scope that the selector applies to. + #[serde(rename = "scopeName")] + pub scope_name: String, + /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaSelector { + /// AnnotationSelector is used to select projects by annotation. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// LabelSelector is used to select projects by label. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option, +} + +/// LabelSelector is used to select projects by label. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaSelectorLabels { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaSelectorLabelsMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Status defines the actual enforced quota and its current usage +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaStatus { + /// Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespaces: Option>, + /// Total defines the actual enforced quota and its current usage across all projects + pub total: ClusterResourceQuotaStatusTotal, +} + +/// ResourceQuotaStatusByNamespace gives status for a particular project +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaStatusNamespaces { + /// Namespace the project this status applies to + pub namespace: String, + /// Status indicates how many resources have been consumed by this project + pub status: ClusterResourceQuotaStatusNamespacesStatus, +} + +/// Status indicates how many resources have been consumed by this project +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaStatusNamespacesStatus { + /// Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hard: Option>, + /// Used is the current observed total usage of the resource in the namespace. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub used: Option>, +} + +/// Total defines the actual enforced quota and its current usage across all projects +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceQuotaStatusTotal { + /// Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hard: Option>, + /// Used is the current observed total usage of the resource in the namespace. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub used: Option>, +} + diff --git a/kube-custom-resources-rs/src/quota_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/quota_openshift_io/v1/mod.rs new file mode 100644 index 000000000..9ec4a757c --- /dev/null +++ b/kube-custom-resources-rs/src/quota_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod clusterresourcequotas; diff --git a/kube-custom-resources-rs/src/route_openshift_io/mod.rs b/kube-custom-resources-rs/src/route_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/route_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/route_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/route_openshift_io/v1/mod.rs new file mode 100644 index 000000000..6a664abd8 --- /dev/null +++ b/kube-custom-resources-rs/src/route_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod routes; diff --git a/kube-custom-resources-rs/src/route_openshift_io/v1/routes.rs b/kube-custom-resources-rs/src/route_openshift_io/v1/routes.rs new file mode 100644 index 000000000..5ac7b05bc --- /dev/null +++ b/kube-custom-resources-rs/src/route_openshift_io/v1/routes.rs @@ -0,0 +1,280 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/route.openshift.io/v1/routes.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// spec is the desired state of the route +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "route.openshift.io", version = "v1", kind = "Route", plural = "routes")] +#[kube(namespaced)] +#[kube(status = "RouteStatus")] +#[kube(schema = "disabled")] +pub struct RouteSpec { + /// alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "alternateBackends")] + pub alternate_backends: Option>, + /// host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub host: Option, + /// httpHeaders defines policy for HTTP headers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpHeaders")] + pub http_headers: Option, + /// path that the router watches for, to route traffic for to the service. Optional + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, + /// If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub port: Option, + /// subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. + /// Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub subdomain: Option, + /// The tls field provides the ability to configure certificates and termination for the route. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tls: Option, + /// to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. + pub to: RouteTo, + /// Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "wildcardPolicy")] + pub wildcard_policy: Option, +} + +/// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteAlternateBackends { + /// The kind of target that the route is referring to. Currently, only 'Service' is allowed + pub kind: RouteAlternateBackendsKind, + /// name of the service/target that is being referred to. e.g. name of the service + pub name: String, + /// weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub weight: Option, +} + +/// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteAlternateBackendsKind { + Service, + #[serde(rename = "")] + KopiumEmpty, +} + +/// httpHeaders defines policy for HTTP headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeaders { + /// actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub actions: Option, +} + +/// actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActions { + /// request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub request: Option>, + /// response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Note: This field cannot be used if your route uses TLS passthrough. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub response: Option>, +} + +/// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsRequest { + /// action specifies actions to perform on headers, such as setting or deleting headers. + pub action: RouteHttpHeadersActionsRequestAction, + /// name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique. + pub name: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsRequestAction { + /// set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub set: Option, + /// type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + #[serde(rename = "type")] + pub r#type: RouteHttpHeadersActionsRequestActionType, +} + +/// set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsRequestActionSet { + /// value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + pub value: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteHttpHeadersActionsRequestActionType { + Set, + Delete, +} + +/// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsResponse { + /// action specifies actions to perform on headers, such as setting or deleting headers. + pub action: RouteHttpHeadersActionsResponseAction, + /// name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique. + pub name: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsResponseAction { + /// set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub set: Option, + /// type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + #[serde(rename = "type")] + pub r#type: RouteHttpHeadersActionsResponseActionType, +} + +/// set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteHttpHeadersActionsResponseActionSet { + /// value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + pub value: String, +} + +/// action specifies actions to perform on headers, such as setting or deleting headers. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteHttpHeadersActionsResponseActionType { + Set, + Delete, +} + +/// If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RoutePort { + #[serde(rename = "targetPort")] + pub target_port: IntOrString, +} + +/// The tls field provides the ability to configure certificates and termination for the route. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteTls { + /// caCertificate provides the cert authority certificate contents + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCertificate")] + pub ca_certificate: Option, + /// certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub certificate: Option, + /// destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "destinationCACertificate")] + pub destination_ca_certificate: Option, + /// insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. + /// * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "insecureEdgeTerminationPolicy")] + pub insecure_edge_termination_policy: Option, + /// key provides key file contents + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// termination indicates termination type. + /// * edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + /// Note: passthrough termination is incompatible with httpHeader actions + pub termination: RouteTlsTermination, +} + +/// The tls field provides the ability to configure certificates and termination for the route. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteTlsInsecureEdgeTerminationPolicy { + Allow, + None, + Redirect, + #[serde(rename = "")] + KopiumEmpty, +} + +/// The tls field provides the ability to configure certificates and termination for the route. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteTlsTermination { + #[serde(rename = "edge")] + Edge, + #[serde(rename = "reencrypt")] + Reencrypt, + #[serde(rename = "passthrough")] + Passthrough, +} + +/// to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteTo { + /// The kind of target that the route is referring to. Currently, only 'Service' is allowed + pub kind: RouteToKind, + /// name of the service/target that is being referred to. e.g. name of the service + pub name: String, + /// weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub weight: Option, +} + +/// to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteToKind { + Service, + #[serde(rename = "")] + KopiumEmpty, +} + +/// spec is the desired state of the route +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum RouteWildcardPolicy { + None, + Subdomain, + #[serde(rename = "")] + KopiumEmpty, +} + +/// status is the current state of the route +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteStatus { + /// ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ingress: Option>, +} + +/// RouteIngress holds information about the places where a route is exposed. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteStatusIngress { + /// Conditions is the state of the route, may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// Host is the host string under which the route is exposed; this value is required + #[serde(default, skip_serializing_if = "Option::is_none")] + pub host: Option, + /// CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routerCanonicalHostname")] + pub router_canonical_hostname: Option, + /// Name is a name chosen by the router to identify itself; this value is required + #[serde(default, skip_serializing_if = "Option::is_none", rename = "routerName")] + pub router_name: Option, + /// Wildcard policy is the wildcard policy that was allowed where this route is exposed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "wildcardPolicy")] + pub wildcard_policy: Option, +} + +/// RouteIngressCondition contains details for the current condition of this route on a particular router. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct RouteStatusIngressConditions { + /// RFC 3339 date and time when this condition last transitioned + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// Human readable message indicating details about last transition. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// (brief) reason for the condition's last transition, and is usually a machine and human readable constant + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Status is the status of the condition. Can be True, False, Unknown. + pub status: String, + /// Type is the type of the condition. Currently only Admitted. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/samples_operator_openshift_io/mod.rs b/kube-custom-resources-rs/src/samples_operator_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/samples_operator_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/configs.rs b/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/configs.rs new file mode 100644 index 000000000..f276f32d8 --- /dev/null +++ b/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/configs.rs @@ -0,0 +1,78 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/samples.operator.openshift.io/v1/configs.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "samples.operator.openshift.io", version = "v1", kind = "Config", plural = "configs")] +#[kube(status = "ConfigStatus")] +#[kube(schema = "disabled")] +pub struct ConfigSpec { + /// architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub architectures: Option>, + /// managementState is top level on/off type of switch for all operators. When "Managed", this operator processes config and manipulates the samples accordingly. When "Unmanaged", this operator ignores any updates to the resources it watches. When "Removed", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "samplesRegistry")] + pub samples_registry: Option, + /// skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "skippedImagestreams")] + pub skipped_imagestreams: Option>, + /// skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "skippedTemplates")] + pub skipped_templates: Option>, +} + +/// ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatus { + /// architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub architectures: Option>, + /// conditions represents the available maintenance status of the sample imagestreams and templates. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. "Managed") when it was previously "Unmanaged". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "managementState")] + pub management_state: Option, + /// samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "samplesRegistry")] + pub samples_registry: Option, + /// skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "skippedImagestreams")] + pub skipped_imagestreams: Option>, + /// skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "skippedTemplates")] + pub skipped_templates: Option>, + /// version is the value of the operator's payload based version indicator when it was last successfully processed + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// ConfigCondition captures various conditions of the Config as entries are processed. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ConfigStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// lastUpdateTime is the last time this condition was updated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastUpdateTime")] + pub last_update_time: Option, + /// message is a human readable message indicating details about the transition. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// reason is what caused the condition's last transition. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// status of the condition, one of True, False, Unknown. + pub status: String, + /// type of condition. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/mod.rs new file mode 100644 index 000000000..3810d5b3c --- /dev/null +++ b/kube-custom-resources-rs/src/samples_operator_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod configs; diff --git a/kube-custom-resources-rs/src/security_internal_openshift_io/mod.rs b/kube-custom-resources-rs/src/security_internal_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/security_internal_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/security_internal_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/security_internal_openshift_io/v1/mod.rs new file mode 100644 index 000000000..11cb8be4d --- /dev/null +++ b/kube-custom-resources-rs/src/security_internal_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod rangeallocations; diff --git a/kube-custom-resources-rs/src/security_internal_openshift_io/v1/rangeallocations.rs b/kube-custom-resources-rs/src/security_internal_openshift_io/v1/rangeallocations.rs new file mode 100644 index 000000000..84345ec86 --- /dev/null +++ b/kube-custom-resources-rs/src/security_internal_openshift_io/v1/rangeallocations.rs @@ -0,0 +1,7 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/security.internal.openshift.io/v1/rangeallocations.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + + + + diff --git a/kube-custom-resources-rs/src/security_openshift_io/mod.rs b/kube-custom-resources-rs/src/security_openshift_io/mod.rs new file mode 100644 index 000000000..a3a6d96c3 --- /dev/null +++ b/kube-custom-resources-rs/src/security_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/kube-custom-resources-rs/src/security_openshift_io/v1/mod.rs b/kube-custom-resources-rs/src/security_openshift_io/v1/mod.rs new file mode 100644 index 000000000..8f3af7ec7 --- /dev/null +++ b/kube-custom-resources-rs/src/security_openshift_io/v1/mod.rs @@ -0,0 +1 @@ +pub mod securitycontextconstraints; diff --git a/kube-custom-resources-rs/src/security_openshift_io/v1/securitycontextconstraints.rs b/kube-custom-resources-rs/src/security_openshift_io/v1/securitycontextconstraints.rs new file mode 100644 index 000000000..8475471b2 --- /dev/null +++ b/kube-custom-resources-rs/src/security_openshift_io/v1/securitycontextconstraints.rs @@ -0,0 +1,103 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/security.openshift.io/v1/securitycontextconstraints.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.2 + + +use serde::{Serialize, Deserialize}; + +/// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsAllowedFlexVolumes { + /// Driver is the name of the Flexvolume driver. + pub driver: String, +} + +/// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsFsGroup { + /// Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ranges: Option>, + /// Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsFsGroupRanges { + /// Max is the end of the range, inclusive. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub max: Option, + /// Min is the start of the range, inclusive. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub min: Option, +} + +/// RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsRunAsUser { + /// Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + /// UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, + /// UIDRangeMax defines the max value for a strategy that allocates by range. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "uidRangeMax")] + pub uid_range_max: Option, + /// UIDRangeMin defines the min value for a strategy that allocates by range. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "uidRangeMin")] + pub uid_range_min: Option, +} + +/// SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsSeLinuxContext { + /// seLinuxOptions required to run as; required for MustRunAs + #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] + pub se_linux_options: Option, + /// Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// seLinuxOptions required to run as; required for MustRunAs +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsSeLinuxContextSeLinuxOptions { + /// Level is SELinux level label that applies to the container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub level: Option, + /// Role is a SELinux role label that applies to the container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub role: Option, + /// Type is a SELinux type label that applies to the container. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, + /// User is a SELinux user label that applies to the container. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub user: Option, +} + +/// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsSupplementalGroups { + /// Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ranges: Option>, + /// Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct SecurityContextConstraintsSupplementalGroupsRanges { + /// Max is the end of the range, inclusive. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub max: Option, + /// Min is the start of the range, inclusive. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub min: Option, +} + diff --git a/kube-custom-resources-rs/src/sharedresource_openshift_io/mod.rs b/kube-custom-resources-rs/src/sharedresource_openshift_io/mod.rs new file mode 100644 index 000000000..32a5a9d4f --- /dev/null +++ b/kube-custom-resources-rs/src/sharedresource_openshift_io/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/mod.rs b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/mod.rs new file mode 100644 index 000000000..0fc8d98ab --- /dev/null +++ b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/mod.rs @@ -0,0 +1,2 @@ +pub mod sharedconfigmaps; +pub mod sharedsecrets; diff --git a/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedconfigmaps.rs b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedconfigmaps.rs new file mode 100644 index 000000000..0f4715c66 --- /dev/null +++ b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedconfigmaps.rs @@ -0,0 +1,70 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedconfigmaps.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the specification of the desired shared configmap +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "sharedresource.openshift.io", version = "v1alpha1", kind = "SharedConfigMap", plural = "sharedconfigmaps")] +#[kube(status = "SharedConfigMapStatus")] +#[kube(schema = "disabled")] +pub struct SharedConfigMapSpec { + /// configMapRef is a reference to the ConfigMap to share + #[serde(rename = "configMapRef")] + pub config_map_ref: SharedConfigMapConfigMapRef, + /// description is a user readable explanation of what the backing resource provides. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, +} + +/// configMapRef is a reference to the ConfigMap to share +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedConfigMapConfigMapRef { + /// name represents the name of the ConfigMap that is being referenced. + pub name: String, + /// namespace represents the namespace where the referenced ConfigMap is located. + pub namespace: String, +} + +/// status is the observed status of the shared configmap +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedConfigMapStatus { + /// conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedConfigMapStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: SharedConfigMapStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum SharedConfigMapStatusConditionsStatus { + True, + False, + Unknown, +} + diff --git a/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedsecrets.rs b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedsecrets.rs new file mode 100644 index 000000000..4cab37e10 --- /dev/null +++ b/kube-custom-resources-rs/src/sharedresource_openshift_io/v1alpha1/sharedsecrets.rs @@ -0,0 +1,70 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/openshift/api/sharedresource.openshift.io/v1alpha1/sharedsecrets.yaml --derive=PartialEq +// kopium version: 0.16.2 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// spec is the specification of the desired shared secret +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq)] +#[kube(group = "sharedresource.openshift.io", version = "v1alpha1", kind = "SharedSecret", plural = "sharedsecrets")] +#[kube(status = "SharedSecretStatus")] +#[kube(schema = "disabled")] +pub struct SharedSecretSpec { + /// description is a user readable explanation of what the backing resource provides. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub description: Option, + /// secretRef is a reference to the Secret to share + #[serde(rename = "secretRef")] + pub secret_ref: SharedSecretSecretRef, +} + +/// secretRef is a reference to the Secret to share +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedSecretSecretRef { + /// name represents the name of the Secret that is being referenced. + pub name: String, + /// namespace represents the namespace where the referenced Secret is located. + pub namespace: String, +} + +/// status is the observed status of the shared secret +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedSecretStatus { + /// conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SharedSecretStatusConditions { + /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(rename = "lastTransitionTime")] + pub last_transition_time: String, + /// message is a human readable message indicating details about the transition. This may be an empty string. + pub message: String, + /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + pub reason: String, + /// status of the condition, one of True, False, Unknown. + pub status: SharedSecretStatusConditionsStatus, + /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// // other fields } +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum SharedSecretStatusConditionsStatus { + True, + False, + Unknown, +} +